summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2010-01-19 15:45:36 -0800
committerRyan Dahl <ry@tinyclouds.org>2010-01-19 15:45:36 -0800
commite60d653a58a568017d6f69aeaed2e3a765a98104 (patch)
tree817723cbba19468ad6f3a4ab7c48b3513c2078e9
parentbfd31448617dc4d66f6de5ced7c260562e01349f (diff)
downloadnode-e60d653a58a568017d6f69aeaed2e3a765a98104.tar.gz
Upgrade V8 to 2.0.6.1
-rw-r--r--deps/v8/ChangeLog16
-rw-r--r--deps/v8/SConstruct2
-rw-r--r--deps/v8/include/v8-debug.h5
-rw-r--r--deps/v8/include/v8.h1
-rw-r--r--deps/v8/src/api.cc4
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h16
-rw-r--r--deps/v8/src/arm/codegen-arm-inl.h10
-rw-r--r--deps/v8/src/arm/codegen-arm.cc793
-rw-r--r--deps/v8/src/arm/codegen-arm.h18
-rw-r--r--deps/v8/src/arm/cpu-arm.cc10
-rw-r--r--deps/v8/src/arm/fast-codegen-arm.cc672
-rw-r--r--deps/v8/src/arm/ic-arm.cc9
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc6
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h6
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc105
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.h3
-rw-r--r--deps/v8/src/arm/simulator-arm.h10
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.cc30
-rw-r--r--deps/v8/src/arm/virtual-frame-arm.h8
-rw-r--r--deps/v8/src/array.js82
-rw-r--r--deps/v8/src/assembler.cc13
-rw-r--r--deps/v8/src/assembler.h5
-rw-r--r--deps/v8/src/ast.cc2
-rw-r--r--deps/v8/src/ast.h48
-rw-r--r--deps/v8/src/bootstrapper.cc1
-rw-r--r--deps/v8/src/builtins.cc5
-rw-r--r--deps/v8/src/builtins.h4
-rw-r--r--deps/v8/src/code-stubs.h5
-rw-r--r--deps/v8/src/codegen.cc22
-rw-r--r--deps/v8/src/codegen.h60
-rw-r--r--deps/v8/src/compiler.cc60
-rw-r--r--deps/v8/src/contexts.h2
-rw-r--r--deps/v8/src/date-delay.js171
-rw-r--r--deps/v8/src/debug.cc19
-rw-r--r--deps/v8/src/debug.h3
-rw-r--r--deps/v8/src/fast-codegen.cc203
-rw-r--r--deps/v8/src/fast-codegen.h55
-rw-r--r--deps/v8/src/flag-definitions.h7
-rw-r--r--deps/v8/src/frames.cc4
-rw-r--r--deps/v8/src/heap-inl.h26
-rw-r--r--deps/v8/src/heap.cc294
-rw-r--r--deps/v8/src/heap.h53
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc73
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h10
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc15
-rw-r--r--deps/v8/src/ia32/codegen-ia32-inl.h10
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc1933
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h83
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc59
-rw-r--r--deps/v8/src/ia32/fast-codegen-ia32.cc671
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc43
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc35
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h25
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc153
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.h6
-rw-r--r--deps/v8/src/ia32/simulator-ia32.h6
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc22
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.cc28
-rw-r--r--deps/v8/src/ia32/virtual-frame-ia32.h5
-rw-r--r--deps/v8/src/ic.cc4
-rw-r--r--deps/v8/src/ic.h4
-rw-r--r--deps/v8/src/jsregexp.cc720
-rw-r--r--deps/v8/src/jsregexp.h183
-rw-r--r--deps/v8/src/jump-target.h3
-rw-r--r--deps/v8/src/macros.py13
-rw-r--r--deps/v8/src/mark-compact.cc245
-rw-r--r--deps/v8/src/mark-compact.h10
-rw-r--r--deps/v8/src/math.js8
-rw-r--r--deps/v8/src/messages.js4
-rw-r--r--deps/v8/src/mksnapshot.cc1
-rw-r--r--deps/v8/src/objects-debug.cc1
-rw-r--r--deps/v8/src/objects-inl.h15
-rw-r--r--deps/v8/src/objects.cc71
-rw-r--r--deps/v8/src/objects.h151
-rw-r--r--deps/v8/src/parser.cc27
-rw-r--r--deps/v8/src/parser.h1
-rw-r--r--deps/v8/src/regexp-delay.js12
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.cc9
-rw-r--r--deps/v8/src/regexp-macro-assembler-tracer.h2
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc17
-rw-r--r--deps/v8/src/regexp-macro-assembler.h2
-rw-r--r--deps/v8/src/regexp-stack.h12
-rw-r--r--deps/v8/src/runtime.cc318
-rw-r--r--deps/v8/src/runtime.h7
-rw-r--r--deps/v8/src/runtime.js50
-rw-r--r--deps/v8/src/scopes.cc2
-rw-r--r--deps/v8/src/serialize.cc32
-rw-r--r--deps/v8/src/serialize.h21
-rw-r--r--deps/v8/src/spaces.cc80
-rw-r--r--deps/v8/src/spaces.h113
-rw-r--r--deps/v8/src/string.js76
-rw-r--r--deps/v8/src/utils.cc1
-rw-r--r--deps/v8/src/v8-counters.h8
-rw-r--r--deps/v8/src/v8natives.js66
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/codegen-x64-inl.h10
-rw-r--r--deps/v8/src/x64/codegen-x64.cc591
-rw-r--r--deps/v8/src/x64/codegen-x64.h15
-rw-r--r--deps/v8/src/x64/fast-codegen-x64.cc723
-rw-r--r--deps/v8/src/x64/ic-x64.cc11
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h7
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc153
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.h5
-rw-r--r--deps/v8/src/x64/simulator-x64.h6
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc12
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.cc22
-rw-r--r--deps/v8/src/x64/virtual-frame-x64.h3
-rw-r--r--deps/v8/test/cctest/test-api.cc21
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc13
-rw-r--r--deps/v8/test/cctest/test-regexp.cc106
-rw-r--r--deps/v8/test/cctest/test-serialize.cc145
-rw-r--r--deps/v8/test/es5conform/es5conform.status164
-rw-r--r--deps/v8/test/mjsunit/bit-not.js75
-rw-r--r--deps/v8/test/mjsunit/bitwise-operations-undefined.js49
-rw-r--r--deps/v8/test/mjsunit/compare-character.js50
-rw-r--r--deps/v8/test/mjsunit/compare-nan.js22
-rw-r--r--deps/v8/test/mjsunit/compiler/countoperation.js111
-rw-r--r--deps/v8/test/mjsunit/eval.js35
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives.js8
-rw-r--r--deps/v8/test/mjsunit/get-own-property-descriptor.js51
-rw-r--r--deps/v8/test/mjsunit/get-prototype-of.js68
-rw-r--r--deps/v8/test/mjsunit/json.js6
-rw-r--r--deps/v8/test/mjsunit/mirror-date.js12
-rw-r--r--deps/v8/test/mjsunit/smi-ops.js27
-rw-r--r--deps/v8/tools/gyp/v8.gyp12
-rw-r--r--deps/v8/tools/jsmin.py4
-rwxr-xr-xdeps/v8/tools/presubmit.py2
128 files changed, 7605 insertions, 3274 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index f1b5453ef..192dd2500 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,19 @@
+2010-01-14: Version 2.0.6
+
+ Added ES5 Object.getPrototypeOf, GetOwnPropertyDescriptor,
+ GetOwnProperty, FromPropertyDescriptor.
+
+ Fixed Mac x64 build errors.
+
+ Improved performance of some math and string operations.
+
+ Improved performance of some regexp operations.
+
+ Improved performance of context creation.
+
+ Improved performance of hash tables.
+
+
2009-12-18: Version 2.0.5
Extended to upper limit of map space to allow for 7 times as many map
diff --git a/deps/v8/SConstruct b/deps/v8/SConstruct
index 00bc4efd3..97189073b 100644
--- a/deps/v8/SConstruct
+++ b/deps/v8/SConstruct
@@ -252,6 +252,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
+ '-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@@ -948,6 +949,7 @@ def BuildSpecific(env, mode, env_overrides):
d8_env = Environment()
d8_env.Replace(**context.flags['d8'])
+ context.ApplyEnvOverrides(d8_env)
shell = d8_env.Program('d8' + suffix, object_files + shell_files)
context.d8_targets.append(shell)
diff --git a/deps/v8/include/v8-debug.h b/deps/v8/include/v8-debug.h
index b27bacc10..10b41e236 100644
--- a/deps/v8/include/v8-debug.h
+++ b/deps/v8/include/v8-debug.h
@@ -258,8 +258,11 @@ class EXPORT Debug {
* supplied TCP/IP port for remote debugger connection.
* \param name the name of the embedding application
* \param port the TCP/IP port to listen on
+ * \param wait_for_connection whether V8 should pause on a first statement
+ * allowing remote debugger to connect before anything interesting happened
*/
- static bool EnableAgent(const char* name, int port);
+ static bool EnableAgent(const char* name, int port,
+ bool wait_for_connection = false);
};
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 2e30992ed..6125286e8 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -503,6 +503,7 @@ class V8EXPORT ScriptData { // NOLINT
virtual int Length() = 0;
virtual unsigned* Data() = 0;
+ virtual bool HasError() = 0;
};
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index d793b9f11..ab5d0a560 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -3741,8 +3741,8 @@ Local<Value> Debug::GetMirror(v8::Handle<v8::Value> obj) {
}
-bool Debug::EnableAgent(const char* name, int port) {
- return i::Debugger::StartAgent(name, port);
+bool Debug::EnableAgent(const char* name, int port, bool wait_for_connection) {
+ return i::Debugger::StartAgent(name, port, wait_for_connection);
}
#endif // ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 5f47cb796..fd2fcd305 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -229,14 +229,24 @@ void Assembler::emit(Instr x) {
Address Assembler::target_address_address_at(Address pc) {
- Instr instr = Memory::int32_at(pc);
- // Verify that the instruction at pc is a ldr<cond> <Rd>, [pc +/- offset_12].
+ Address target_pc = pc;
+ Instr instr = Memory::int32_at(target_pc);
+ // If we have a bx instruction, the instruction before the bx is
+ // what we need to patch.
+ static const int32_t kBxInstMask = 0x0ffffff0;
+ static const int32_t kBxInstPattern = 0x012fff10;
+ if ((instr & kBxInstMask) == kBxInstPattern) {
+ target_pc -= kInstrSize;
+ instr = Memory::int32_at(target_pc);
+ }
+ // Verify that the instruction to patch is a
+ // ldr<cond> <Rd>, [pc +/- offset_12].
ASSERT((instr & 0x0f7f0000) == 0x051f0000);
int offset = instr & 0xfff; // offset_12 is unsigned
if ((instr & (1 << 23)) == 0) offset = -offset; // U bit defines offset sign
// Verify that the constant pool comes after the instruction referencing it.
ASSERT(offset >= -4);
- return pc + offset + 8;
+ return target_pc + offset + 8;
}
diff --git a/deps/v8/src/arm/codegen-arm-inl.h b/deps/v8/src/arm/codegen-arm-inl.h
index 749f32db0..17e18d9fd 100644
--- a/deps/v8/src/arm/codegen-arm-inl.h
+++ b/deps/v8/src/arm/codegen-arm-inl.h
@@ -67,16 +67,6 @@ void Reference::GetValueAndSpill() {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ b(cc, &entry_label_); }
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 89d974c73..70d8ab495 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -44,7 +44,8 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc);
+ Condition cc,
+ bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* rhs_not_nan,
Label* slow,
@@ -186,12 +187,18 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_is_shadowed_ = false;
VirtualFrame::SpilledScope spilled_scope;
- if (scope_->num_heap_slots() > 0) {
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
// Allocate local context.
// Get outer context and create a new context based on it.
__ ldr(r0, frame_->Function());
frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ frame_->CallStub(&stub, 1);
+ } else {
+ frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
#ifdef DEBUG
JumpTarget verified_true;
@@ -240,28 +247,35 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
// initialization because the arguments object may be stored in the
// context.
if (scope_->arguments() != NULL) {
- ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ allocate arguments object");
- { Reference shadow_ref(this, scope_->arguments_shadow());
- { Reference arguments_ref(this, scope_->arguments());
- ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
- __ ldr(r2, frame_->Function());
- // The receiver is below the arguments, the return address,
- // and the frame pointer on the stack.
- const int kReceiverDisplacement = 2 + scope_->num_parameters();
- __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
- __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
- frame_->Adjust(3);
- __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
- frame_->CallStub(&stub, 3);
- frame_->EmitPush(r0);
- arguments_ref.SetValue(NOT_CONST_INIT);
- }
- shadow_ref.SetValue(NOT_CONST_INIT);
- }
+ ASSERT(scope_->arguments_shadow() != NULL);
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
+ __ ldr(r2, frame_->Function());
+ // The receiver is below the arguments, the return address, and the
+ // frame pointer on the stack.
+ const int kReceiverDisplacement = 2 + scope_->num_parameters();
+ __ add(r1, fp, Operand(kReceiverDisplacement * kPointerSize));
+ __ mov(r0, Operand(Smi::FromInt(scope_->num_parameters())));
+ frame_->Adjust(3);
+ __ stm(db_w, sp, r0.bit() | r1.bit() | r2.bit());
+ frame_->CallStub(&stub, 3);
+ frame_->EmitPush(r0);
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
frame_->Drop(); // Value is no longer needed.
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ __ mov(ip, Operand(Factory::the_hole_value()));
+ frame_->EmitPush(ip);
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -613,15 +627,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
LoadAndSpill(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
LoadAndSpill(property->key());
@@ -1986,13 +1992,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->EmitPush(r0);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Here we make use of the convenient property that it doesn't matter
- // whether a value is immediately on top of or underneath a zero-sized
- // reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -2298,12 +2300,21 @@ void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
VirtualFrame::SpilledScope spilled_scope;
ASSERT(boilerplate->IsBoilerplate());
- // Create a new closure.
- frame_->EmitPush(cp);
__ mov(r0, Operand(boilerplate));
- frame_->EmitPush(r0);
- frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->EmitPush(r0);
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+ FastNewClosureStub stub;
+ frame_->EmitPush(r0);
+ frame_->CallStub(&stub, 1);
+ frame_->EmitPush(r0);
+ } else {
+ // Create a new closure.
+ frame_->EmitPush(cp);
+ frame_->EmitPush(r0);
+ frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->EmitPush(r0);
+ }
}
@@ -2444,6 +2455,87 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
}
+void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
+ ASSERT(slot != NULL);
+ if (slot->type() == Slot::LOOKUP) {
+ ASSERT(slot->var()->is_dynamic());
+
+ // For now, just do a runtime call.
+ frame_->EmitPush(cp);
+ __ mov(r0, Operand(slot->var()->name()));
+ frame_->EmitPush(r0);
+
+ if (init_state == CONST_INIT) {
+ // Same as the case for a normal store, but ignores attribute
+ // (e.g. READ_ONLY) of context slot so that we can initialize
+ // const properties (introduced via eval("const foo = (some
+ // expr);")). Also, uses the current function context instead of
+ // the top context.
+ //
+ // Note that we must declare the foo upon entry of eval(), via a
+ // context slot declaration, but we cannot initialize it at the
+ // same time, because the const declaration may be at the end of
+ // the eval code (sigh...) and the const variable may have been
+ // used before (where its value is 'undefined'). Thus, we can only
+ // do the initialization when we actually encounter the expression
+ // and when the expression operands are defined and valid, and
+ // thus we need the split into 2 operations: declaration of the
+ // context slot followed by initialization.
+ frame_->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kStoreContextSlot, 3);
+ }
+ // Storing a variable must keep the (new) value on the expression
+ // stack. This is necessary for compiling assignment expressions.
+ frame_->EmitPush(r0);
+
+ } else {
+ ASSERT(!slot->var()->is_dynamic());
+
+ JumpTarget exit;
+ if (init_state == CONST_INIT) {
+ ASSERT(slot->var()->mode() == Variable::CONST);
+ // Only the first const initialization must be executed (the slot
+ // still contains 'the hole' value). When the assignment is
+ // executed, the code is identical to a normal store (see below).
+ Comment cmnt(masm_, "[ Init const");
+ __ ldr(r2, SlotOperand(slot, r2));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(r2, ip);
+ exit.Branch(ne);
+ }
+
+ // We must execute the store. Storing a variable must keep the
+ // (new) value on the stack. This is necessary for compiling
+ // assignment expressions.
+ //
+ // Note: We will reach here even with slot->var()->mode() ==
+ // Variable::CONST because of const declarations which will
+ // initialize consts to 'the hole' value and by doing so, end up
+ // calling this code. r2 may be loaded with context; used below in
+ // RecordWrite.
+ frame_->EmitPop(r0);
+ __ str(r0, SlotOperand(slot, r2));
+ frame_->EmitPush(r0);
+ if (slot->type() == Slot::CONTEXT) {
+ // Skip write barrier if the written value is a smi.
+ __ tst(r0, Operand(kSmiTagMask));
+ exit.Branch(eq);
+ // r2 is loaded with context when calling SlotOperand above.
+ int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
+ __ mov(r3, Operand(offset));
+ __ RecordWrite(r2, r3, r1);
+ }
+ // If we definitely did not jump over the assignment, we do not need
+ // to bind the exit label. Doing so can defeat peephole
+ // optimization.
+ if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
+ exit.Bind();
+ }
+ }
+}
+
+
void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@@ -2601,42 +2693,6 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateObjectLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- explicit DeferredObjectLiteral(ObjectLiteral* node) : node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- virtual void Generate();
-
- private:
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Argument is passed in r1.
-
- // If the entry is undefined we call the runtime system to compute
- // the literal.
- // Literal array (0).
- __ push(r1);
- // Literal index (1).
- __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- __ push(r0);
- // Constant properties (2).
- __ mov(r0, Operand(node_->constant_properties()));
- __ push(r0);
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- __ mov(r2, Operand(r0));
- // Result is returned in r2.
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2644,39 +2700,22 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ObjectLiteral");
- DeferredObjectLiteral* deferred = new DeferredObjectLiteral(node);
-
- // Retrieve the literal array and check the allocated entry.
-
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, Operand(ip));
- deferred->Branch(eq);
- deferred->BindExit();
-
- // Push the object literal boilerplate.
- frame_->EmitPush(r2);
-
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ __ ldr(r2, frame_->Function());
+ // Literal array.
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ // Literal index.
+ __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+ // Constant properties.
+ __ mov(r0, Operand(node->constant_properties()));
+ frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ if (node->depth() > 1) {
+ frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
- // r0: cloned object literal
+ // r0: created object literal
for (int i = 0; i < node->properties()->length(); i++) {
ObjectLiteral::Property* property = node->properties()->at(i);
@@ -2724,42 +2763,6 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
-// This deferred code stub will be used for creating the boilerplate
-// by calling Runtime_CreateArrayLiteralBoilerplate.
-// Each created boilerplate is stored in the JSFunction and they are
-// therefore context dependent.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- explicit DeferredArrayLiteral(ArrayLiteral* node) : node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- virtual void Generate();
-
- private:
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Argument is passed in r1.
-
- // If the entry is undefined we call the runtime system to computed
- // the literal.
- // Literal array (0).
- __ push(r1);
- // Literal index (1).
- __ mov(r0, Operand(Smi::FromInt(node_->literal_index())));
- __ push(r0);
- // Constant properties (2).
- __ mov(r0, Operand(node_->literals()));
- __ push(r0);
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- __ mov(r2, Operand(r0));
- // Result is returned in r2.
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
#ifdef DEBUG
int original_height = frame_->height();
@@ -2767,39 +2770,22 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
VirtualFrame::SpilledScope spilled_scope;
Comment cmnt(masm_, "[ ArrayLiteral");
- DeferredArrayLiteral* deferred = new DeferredArrayLiteral(node);
-
- // Retrieve the literal array and check the allocated entry.
-
// Load the function of this activation.
- __ ldr(r1, frame_->Function());
-
- // Load the literals array of the function.
- __ ldr(r1, FieldMemOperand(r1, JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ ldr(r2, FieldMemOperand(r1, literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r2, Operand(ip));
- deferred->Branch(eq);
- deferred->BindExit();
-
- // Push the object literal boilerplate.
- frame_->EmitPush(r2);
-
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ __ ldr(r2, frame_->Function());
+ // Literals array.
+ __ ldr(r2, FieldMemOperand(r2, JSFunction::kLiteralsOffset));
+ // Literal index.
+ __ mov(r1, Operand(Smi::FromInt(node->literal_index())));
+ // Constant elements.
+ __ mov(r0, Operand(node->constant_elements()));
+ frame_->EmitPushMultiple(3, r2.bit() | r1.bit() | r0.bit());
+ if (node->depth() > 1) {
+ frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
- frame_->CallRuntime(clone_function_id, 1);
frame_->EmitPush(r0); // save the result
- // r0: cloned object literal
+ // r0: created object literal
// Generate code to set the elements in the array that are not
// literals.
@@ -2998,13 +2984,15 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->EmitPush(r2);
}
+ // Push the receiver.
+ __ ldr(r1, frame_->Receiver());
+ frame_->EmitPush(r1);
+
// Resolve the call.
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
// Touch up stack with the right values for the function and the receiver.
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize));
- __ str(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kHeaderSize + kPointerSize));
+ __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
__ str(r1, MemOperand(sp, arg_count * kPointerSize));
// Call the function.
@@ -3544,28 +3532,49 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- VirtualFrame::SpilledScope spilled_scope;
- LoadAndSpill(args->at(0));
- switch (op) {
- case SIN:
- frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+
+ frame_->CallRuntime(Runtime::kStringAdd, 2);
frame_->EmitPush(r0);
}
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ frame_->CallRuntime(Runtime::kSubString, 3);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
Load(args->at(0));
Load(args->at(1));
- frame_->CallRuntime(Runtime::kStringAdd, 2);
+ frame_->CallRuntime(Runtime::kStringCompare, 2);
+ frame_->EmitPush(r0);
+}
+
+
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(4, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+
+ frame_->CallRuntime(Runtime::kRegExpExec, 4);
frame_->EmitPush(r0);
}
@@ -3713,7 +3722,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
frame_->CallStub(&stub, 0);
break;
}
@@ -4343,83 +4352,7 @@ void Reference::SetValue(InitState init_state) {
case SLOT: {
Comment cmnt(masm, "[ Store to Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
- ASSERT(slot != NULL);
- if (slot->type() == Slot::LOOKUP) {
- ASSERT(slot->var()->is_dynamic());
-
- // For now, just do a runtime call.
- frame->EmitPush(cp);
- __ mov(r0, Operand(slot->var()->name()));
- frame->EmitPush(r0);
-
- if (init_state == CONST_INIT) {
- // Same as the case for a normal store, but ignores attribute
- // (e.g. READ_ONLY) of context slot so that we can initialize
- // const properties (introduced via eval("const foo = (some
- // expr);")). Also, uses the current function context instead of
- // the top context.
- //
- // Note that we must declare the foo upon entry of eval(), via a
- // context slot declaration, but we cannot initialize it at the
- // same time, because the const declaration may be at the end of
- // the eval code (sigh...) and the const variable may have been
- // used before (where its value is 'undefined'). Thus, we can only
- // do the initialization when we actually encounter the expression
- // and when the expression operands are defined and valid, and
- // thus we need the split into 2 operations: declaration of the
- // context slot followed by initialization.
- frame->CallRuntime(Runtime::kInitializeConstContextSlot, 3);
- } else {
- frame->CallRuntime(Runtime::kStoreContextSlot, 3);
- }
- // Storing a variable must keep the (new) value on the expression
- // stack. This is necessary for compiling assignment expressions.
- frame->EmitPush(r0);
-
- } else {
- ASSERT(!slot->var()->is_dynamic());
-
- JumpTarget exit;
- if (init_state == CONST_INIT) {
- ASSERT(slot->var()->mode() == Variable::CONST);
- // Only the first const initialization must be executed (the slot
- // still contains 'the hole' value). When the assignment is
- // executed, the code is identical to a normal store (see below).
- Comment cmnt(masm, "[ Init const");
- __ ldr(r2, cgen_->SlotOperand(slot, r2));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(r2, ip);
- exit.Branch(ne);
- }
-
- // We must execute the store. Storing a variable must keep the
- // (new) value on the stack. This is necessary for compiling
- // assignment expressions.
- //
- // Note: We will reach here even with slot->var()->mode() ==
- // Variable::CONST because of const declarations which will
- // initialize consts to 'the hole' value and by doing so, end up
- // calling this code. r2 may be loaded with context; used below in
- // RecordWrite.
- frame->EmitPop(r0);
- __ str(r0, cgen_->SlotOperand(slot, r2));
- frame->EmitPush(r0);
- if (slot->type() == Slot::CONTEXT) {
- // Skip write barrier if the written value is a smi.
- __ tst(r0, Operand(kSmiTagMask));
- exit.Branch(eq);
- // r2 is loaded with context when calling SlotOperand above.
- int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ mov(r3, Operand(offset));
- __ RecordWrite(r2, r3, r1);
- }
- // If we definitely did not jump over the assignment, we do not need
- // to bind the exit label. Doing so can defeat peephole
- // optimization.
- if (init_state == CONST_INIT || slot->type() == Slot::CONTEXT) {
- exit.Bind();
- }
- }
+ cgen_->StoreToSlot(slot, init_state);
break;
}
@@ -4466,6 +4399,103 @@ void Reference::SetValue(InitState init_state) {
}
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Clone the boilerplate in new space. Set the context to the
+ // current context in cp.
+ Label gc;
+
+ // Pop the boilerplate function from the stack.
+ __ pop(r3);
+
+ // Attempt to allocate new JSFunction in new space.
+ __ AllocateInNewSpace(JSFunction::kSize / kPointerSize,
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ ldr(r2, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalContextOffset));
+ __ ldr(r2, MemOperand(r2, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+
+ // Clone the rest of the boilerplate fields. We don't have to update
+ // the write barrier because the allocated object is in new space.
+ for (int offset = kPointerSize;
+ offset < JSFunction::kSize;
+ offset += kPointerSize) {
+ if (offset == JSFunction::kContextOffset) {
+ __ str(cp, FieldMemOperand(r0, offset));
+ } else {
+ __ ldr(r1, FieldMemOperand(r3, offset));
+ __ str(r1, FieldMemOperand(r0, offset));
+ }
+ }
+
+ // Return result. The argument boilerplate has been popped already.
+ __ Ret();
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ push(cp);
+ __ push(r3);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+
+ // Attempt to allocate the context in new space.
+ __ AllocateInNewSpace(length + (FixedArray::kHeaderSize / kPointerSize),
+ r0,
+ r1,
+ r2,
+ &gc,
+ TAG_OBJECT);
+
+ // Load the function from the stack.
+ __ ldr(r3, MemOperand(sp, 0 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(r2, Heap::kContextMapRootIndex);
+ __ str(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ mov(r2, Operand(length));
+ __ str(r2, FieldMemOperand(r0, Array::kLengthOffset));
+
+ // Setup the fixed slots.
+ __ mov(r1, Operand(Smi::FromInt(0)));
+ __ str(r3, MemOperand(r0, Context::SlotOffset(Context::CLOSURE_INDEX)));
+ __ str(r0, MemOperand(r0, Context::SlotOffset(Context::FCONTEXT_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::EXTENSION_INDEX)));
+
+ // Copy the global object from the surrounding context.
+ __ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ str(r1, MemOperand(r0, Context::SlotOffset(Context::GLOBAL_INDEX)));
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ str(r1, MemOperand(r0, Context::SlotOffset(i)));
+ }
+
+ // Remove the on-stack argument and return.
+ __ mov(cp, r0);
+ __ pop();
+ __ Ret();
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+}
+
+
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer for 0
// (31 instead of 32).
@@ -4692,47 +4722,55 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc) {
+ Condition cc,
+ bool never_nan_nan) {
Label not_identical;
+ Label heap_number, return_equal;
+ Register exp_mask_reg = r5;
__ cmp(r0, Operand(r1));
__ b(ne, &not_identical);
- Register exp_mask_reg = r5;
- __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
- // so we do the second best thing - test it ourselves.
- Label heap_number, return_equal;
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == lt || cc == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ mov(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == lt || cc == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
__ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but (undefined <= undefined)
- // == false! See ECMAScript 11.8.5.
- if (cc == le || cc == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, Operand(r2));
- __ b(ne, &return_equal);
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // undefined <= undefined should fail.
- } else {
- __ mov(r0, Operand(LESS)); // undefined >= undefined should fail.
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+ __ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == le || cc == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, Operand(r2));
+ __ b(ne, &return_equal);
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
+ }
+ __ mov(pc, Operand(lr)); // Return.
}
- __ mov(pc, Operand(lr)); // Return.
}
}
}
+
__ bind(&return_equal);
if (cc == lt) {
__ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
@@ -4743,43 +4781,45 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ mov(pc, Operand(lr)); // Return.
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ and_(r3, r2, Operand(exp_mask_reg));
- __ cmp(r3, Operand(exp_mask_reg));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
- // (it's a NaN). For <= and >= we need to load r0 with the failing value
- // if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
- if (cc == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ and_(r3, r2, Operand(exp_mask_reg));
+ __ cmp(r3, Operand(exp_mask_reg));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if not
+ // (it's a NaN). For <= and >= we need to load r0 with the failing value
+ // if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ mov(pc, Operand(lr), LeaveCC, eq); // Return equal
+ if (cc == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
+ __ mov(pc, Operand(lr)); // Return.
}
- __ mov(pc, Operand(lr)); // Return.
+ // No fall through here.
}
- // No fall through here.
__ bind(&not_identical);
}
@@ -4979,6 +5019,14 @@ static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm) {
// Check for oddballs: true, false, null, undefined.
__ cmp(r3, Operand(ODDBALL_TYPE));
__ b(eq, &return_not_equal);
+
+ // Now that we have the types we might as well check for symbol-symbol.
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ and_(r2, r2, Operand(r3));
+ __ tst(r2, Operand(kIsSymbolMask));
+ __ b(ne, &return_not_equal);
}
@@ -5005,12 +5053,13 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Fast negative check for symbol-to-symbol equality.
static void EmitCheckForSymbols(MacroAssembler* masm, Label* slow) {
// r2 is object type of r0.
- __ tst(r2, Operand(kIsNotStringMask));
- __ b(ne, slow);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
__ tst(r2, Operand(kIsSymbolMask));
__ b(eq, slow);
- __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
- __ b(ge, slow);
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ ldrb(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
__ tst(r3, Operand(kIsSymbolMask));
__ b(eq, slow);
@@ -5032,7 +5081,7 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc_);
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
@@ -5096,19 +5145,19 @@ void CompareStub::Generate(MacroAssembler* masm) {
&slow);
__ bind(&check_for_symbols);
- if (cc_ == eq) {
+ // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+ // symbols.
+ if (cc_ == eq && !strict_) {
// Either jumps to slow or returns the answer. Assumes that r2 is the type
// of r0 on entry.
EmitCheckForSymbols(masm, &slow);
}
__ bind(&slow);
- __ push(lr);
__ push(r1);
__ push(r0);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- int arg_count = 1; // Not counting receiver.
if (cc_ == eq) {
native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
@@ -5120,16 +5169,13 @@ void CompareStub::Generate(MacroAssembler* masm) {
ASSERT(cc_ == gt || cc_ == ge); // remaining cases
ncr = LESS;
}
- arg_count++;
__ mov(r0, Operand(Smi::FromInt(ncr)));
__ push(r0);
}
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
- __ InvokeBuiltin(native, CALL_JS);
- __ cmp(r0, Operand(0));
- __ pop(pc);
+ __ InvokeBuiltin(native, JUMP_JS);
}
@@ -5955,7 +6001,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
}
-void UnarySubStub::Generate(MacroAssembler* masm) {
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ ASSERT(op_ == Token::SUB);
+
Label undo;
Label slow;
Label not_smi;
@@ -6579,10 +6627,53 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
}
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case lt: return "CompareStub_LT";
+ case gt: return "CompareStub_GT";
+ case le: return "CompareStub_LE";
+ case ge: return "CompareStub_GE";
+ case ne: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case eq: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
+}
+
+
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) >> 28 < (1 << 15));
- return (static_cast<unsigned>(cc_) >> 27) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT((static_cast<unsigned>(cc_) >> 26) < (1 << 16));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != eq) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) >> 26) | nnn_value | (strict_ ? 1 : 0);
}
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index e9f11e9c6..b62bc36d7 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -272,6 +272,9 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
+ // Store the value on top of the stack to a slot.
+ void StoreToSlot(Slot* slot, InitState init_state);
+
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
Register tmp,
@@ -360,15 +363,18 @@ class CodeGenerator: public AstVisitor {
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
diff --git a/deps/v8/src/arm/cpu-arm.cc b/deps/v8/src/arm/cpu-arm.cc
index a5a358b3a..4e39cdaff 100644
--- a/deps/v8/src/arm/cpu-arm.cc
+++ b/deps/v8/src/arm/cpu-arm.cc
@@ -61,28 +61,32 @@ void CPU::FlushICache(void* start, size_t size) {
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
#ifdef __ARM_EABI__
- register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
+ register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"swi 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
+ // r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
- "1: swi 0x0 \n\t"
+ "1: push {r7} \n\t"
+ "mov r7, %4 \n\t"
+ "swi 0x0 \n\t"
+ "pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
- : "0" (beg), "r" (end), "r" (flg), "r" (scno)
+ : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
#else
diff --git a/deps/v8/src/arm/fast-codegen-arm.cc b/deps/v8/src/arm/fast-codegen-arm.cc
index 55d87b7c2..0d934b5ab 100644
--- a/deps/v8/src/arm/fast-codegen-arm.cc
+++ b/deps/v8/src/arm/fast-codegen-arm.cc
@@ -214,178 +214,112 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context,
+ Slot* slot,
+ Register scratch) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
case Expression::kValue:
- __ push(source);
- break;
case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ pop();
- __ jmp(false_label_);
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ Move(scratch, slot);
+ Apply(context, scratch);
break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ pop();
- __ jmp(true_label_);
- }
}
}
-template <>
-MemOperand FastCodeGenerator::CreateSlotOperand<MemOperand>(
- Slot* source,
- Register scratch) {
- switch (source->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return MemOperand(fp, SlotOffset(source));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
- UNREACHABLE();
- return MemOperand(r0, 0); // Dead code to make the compiler happy.
- }
-}
-
-
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- // Use dst as scratch.
- MemOperand location = CreateSlotOperand<MemOperand>(source, dst);
- __ ldr(dst, location);
-}
-
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
- case Expression::kValue: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kValueTest:
case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
+ __ mov(ip, Operand(lit->handle()));
+ Apply(context, ip);
break;
}
}
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ __ Drop(1);
break;
- case Expression::kValue: // Fall through.
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
- case Expression::kTestValue:
- __ mov(ip, Operand(expr->handle()));
- Move(context, ip);
+ case Expression::kValue:
break;
- }
-}
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ str(src, MemOperand(fp, SlotOffset(dst)));
+ case Expression::kTest:
+ __ pop(r0);
+ TestAndBranch(r0, true_label_, false_label_);
break;
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- int index = Context::SlotOffset(dst->index());
- __ mov(scratch2, Operand(index));
- __ str(src, MemOperand(scratch1, index));
- __ RecordWrite(scratch1, scratch2, src);
+ case Expression::kValueTest: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp, 0));
+ TestAndBranch(r0, true_label_, &discard);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
break;
}
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ case Expression::kTestValue: {
+ Label discard;
+ __ ldr(r0, MemOperand(sp, 0));
+ TestAndBranch(r0, &discard, false_label_);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ }
}
}
-
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int drop_count) {
- ASSERT(drop_count > 0);
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(sp));
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- __ add(sp, sp, Operand(drop_count * kPointerSize));
+ __ Drop(count);
break;
case Expression::kValue:
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
break;
case Expression::kTest:
- ASSERT(!source.is(sp));
- __ add(sp, sp, Operand(drop_count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
+ __ Drop(count);
+ TestAndBranch(reg, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
- TestAndBranch(source, true_label_, &discard);
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ TestAndBranch(reg, true_label_, &discard);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
- if (drop_count > 1) {
- __ add(sp, sp, Operand((drop_count - 1) * kPointerSize));
- }
- __ str(source, MemOperand(sp));
- TestAndBranch(source, &discard, false_label_);
+ if (count > 1) __ Drop(count - 1);
+ __ str(reg, MemOperand(sp));
+ TestAndBranch(reg, &discard, false_label_);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -393,6 +327,50 @@ void FastCodeGenerator::DropAndMove(Expression::Context context,
}
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return MemOperand(fp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return MemOperand(r0, 0);
+}
+
+
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ // Use destination as scratch.
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ ldr(destination, location);
+}
+
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ str(src, location);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ __ mov(scratch2, Operand(Context::SlotOffset(dst->index())));
+ __ RecordWrite(scratch1, scratch2, src);
+ }
+}
+
+
+
void FastCodeGenerator::TestAndBranch(Register source,
Label* true_label,
Label* false_label) {
@@ -418,19 +396,22 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(ip, MemOperand(fp, SlotOffset(slot)));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
__ pop(ip);
- __ str(ip, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(ip, MemOperand(fp, SlotOffset(slot)));
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -504,7 +485,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
// Value in r0 is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ add(sp, sp, Operand(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -535,7 +516,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ mov(r0, Operand(boilerplate));
__ stm(db_w, sp, cp.bit() | r0.bit());
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
}
@@ -558,13 +539,13 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
__ mov(r2, Operand(var->name()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET_CONTEXT);
- DropAndMove(context, r0);
+ DropAndApply(1, context, r0);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
Comment cmnt(masm_, "Stack slot");
break;
}
@@ -575,21 +556,20 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(context, slot, r0);
+ Apply(context, slot, r0);
} else {
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
+ // Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
Slot* object_slot = object_var->slot();
@@ -598,7 +578,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Load the object.
Move(r2, object_slot);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
@@ -609,12 +589,12 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Push both as arguments to ic.
__ stm(db_w, sp, r2.bit() | r1.bit());
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(context, r0, 2);
+ DropAndApply(2, context, r0);
}
}
@@ -642,7 +622,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ stm(db_w, sp, r4.bit() | r3.bit() | r2.bit() | r1.bit());
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
__ bind(&done);
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
}
@@ -704,7 +684,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ ldr(r0, MemOperand(sp)); // Restore result into r0.
break;
- case ObjectLiteral::Property::GETTER: // Fall through.
+ case ObjectLiteral::Property::GETTER:
case ObjectLiteral::Property::SETTER:
__ push(r0);
Visit(key);
@@ -724,7 +704,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ pop();
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(r0);
@@ -738,7 +718,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(r0);
TestAndBranch(r0, true_label_, &discard);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -747,7 +727,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(r0);
TestAndBranch(r0, &discard, false_label_);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -760,7 +740,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(expr->literals()));
+ __ mov(r1, Operand(expr->constant_elements()));
__ stm(db_w, sp, r3.bit() | r2.bit() | r1.bit());
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
@@ -806,7 +786,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ pop();
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(r0);
@@ -820,7 +800,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(r0);
TestAndBranch(r0, true_label_, &discard);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -829,7 +809,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(r0);
TestAndBranch(r0, &discard, false_label_);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -839,18 +819,21 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
+ SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(r2, Operand(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, r0);
+ Apply(context, r0);
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
+ Expression::Context context) {
+ SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, r0);
+ Apply(context, r0);
}
@@ -861,12 +844,12 @@ void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
GenericBinaryOpStub stub(op,
NO_OVERWRITE);
__ CallStub(&stub);
- Move(context, r0);
+ Apply(context, r0);
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
@@ -880,50 +863,50 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, context, r0);
- } else if (var->slot()) {
+ } else if (var->slot() != NULL) {
Slot* slot = var->slot();
- ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER: {
- switch (expr->context()) {
+ MemOperand target = MemOperand(fp, SlotOffset(slot));
+ switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
// Perform assignment and discard value.
__ pop(r0);
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(r0, target);
break;
case Expression::kValue:
// Perform assignment and preserve value.
__ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(r0, target);
break;
case Expression::kTest:
// Perform assignment and test (and discard) value.
__ pop(r0);
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(r0, target);
TestAndBranch(r0, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
__ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(r0, target);
TestAndBranch(r0, true_label_, &discard);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
__ ldr(r0, MemOperand(sp));
- __ str(r0, MemOperand(fp, SlotOffset(var->slot())));
+ __ str(r0, target);
TestAndBranch(r0, &discard, false_label_);
__ bind(&discard);
- __ pop();
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -932,31 +915,15 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
}
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the chain of contexts to the context containing the slot.
- __ ldr(r0, CodeGenerator::ContextOperand(cp, Context::CLOSURE_INDEX));
- // Load the function context (which is the incoming, outer context).
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ ldr(r0,
- CodeGenerator::ContextOperand(r0, Context::CLOSURE_INDEX));
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ mov(r0, cp);
- }
- // The context may be an intermediate context, not a function context.
- __ ldr(r0, CodeGenerator::ContextOperand(r0, Context::FCONTEXT_INDEX));
- __ pop(r1);
- __ str(r1, CodeGenerator::ContextOperand(r0, slot->index()));
+ MemOperand target = EmitSlotSearch(slot, r1);
+ __ pop(r0);
+ __ str(r0, target);
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(r1);
- } else if (expr->context() != Expression::kEffect) {
- __ mov(r3, r1);
+ if (context == Expression::kValue) {
+ __ push(r0);
+ } else if (context != Expression::kEffect) {
+ __ mov(r3, r0);
}
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
@@ -964,15 +931,14 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
// register. Skip the write barrier if the value written (r1) is a smi.
// The smi test is part of RecordWrite on other platforms, not on arm.
Label exit;
- __ tst(r1, Operand(kSmiTagMask));
+ __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &exit);
__ mov(r2, Operand(offset));
- __ RecordWrite(r0, r2, r1);
+ __ RecordWrite(r1, r2, r0);
__ bind(&exit);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), r3);
+ if (context != Expression::kEffect && context != Expression::kValue) {
+ Apply(context, r3);
}
break;
}
@@ -981,6 +947,10 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -1014,7 +984,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(r0);
}
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, expr->context(), r0);
}
@@ -1025,7 +995,7 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// change to slow case to avoid the quadratic behavior of repeatedly
// adding fast properties.
if (expr->starts_initialization_block()) {
- // Reciever is under the key and value.
+ // Receiver is under the key and value.
__ ldr(ip, MemOperand(sp, 2 * kPointerSize));
__ push(ip);
__ CallRuntime(Runtime::kToSlowProperties, 1);
@@ -1046,15 +1016,13 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Receiver and key are still on stack.
- __ add(sp, sp, Operand(2 * kPointerSize));
- Move(expr->context(), r0);
+ DropAndApply(2, expr->context(), r0);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
// Record the source position for the property load.
SetSourcePosition(expr->position());
@@ -1062,22 +1030,21 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
// Evaluate receiver.
Visit(expr->obj());
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in r2 and the receiver on the stack.
+ if (key->IsPropertyName()) {
+ // Do a named property load. The IC expects the property name in r2 and
+ // the receiver on the stack.
__ mov(r2, Operand(key->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
+ DropAndApply(1, expr->context(), r0);
} else {
- // Do a KEYED property load.
+ // Do a keyed property load.
Visit(expr->key());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Drop key and receiver left on the stack by IC.
- __ pop();
+ DropAndApply(2, expr->context(), r0);
}
- DropAndMove(expr->context(), r0);
}
void FastCodeGenerator::EmitCallWithIC(Call* expr,
@@ -1099,7 +1066,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, expr->context(), r0);
}
@@ -1117,7 +1084,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, expr->context(), r0);
}
@@ -1162,6 +1129,7 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// Load receiver object into r1.
if (prop->is_synthetic()) {
__ ldr(r1, CodeGenerator::GlobalObject());
+ __ ldr(r1, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
} else {
__ ldr(r1, MemOperand(sp, kPointerSize));
}
@@ -1219,14 +1187,14 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
// Load function, arg_count into r1 and r0.
__ mov(r0, Operand(arg_count));
- // Function is in esp[arg_count + 1].
+ // Function is in sp[arg_count + 1].
__ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
Handle<Code> construct_builtin(Builtins::builtin(Builtins::JSConstructCall));
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in r0, or pop it.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, expr->context(), r0);
}
@@ -1257,11 +1225,11 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Restore context register.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), r0);
+ DropAndApply(1, expr->context(), r0);
} else {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
}
}
@@ -1286,7 +1254,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Value is false so it's needed.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ push(ip);
- case Expression::kTest: // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1298,20 +1266,19 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
ASSERT_EQ(Expression::kTest, expr->expression()->context());
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ Label push_true, push_false, done;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
+ case Expression::kEffect:
+ VisitForControl(expr->expression(), &done, &done);
+ __ bind(&done);
+ break;
+
case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, &push_true);
__ bind(&push_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ push(ip);
@@ -1322,41 +1289,26 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ bind(&done);
break;
- case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
- __ bind(&done);
- break;
-
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, true_label_);
break;
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, &push_true);
__ bind(&push_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ push(ip);
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, true_label_);
__ bind(&push_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ push(ip);
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
break;
}
@@ -1390,7 +1342,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
break;
}
@@ -1402,73 +1354,127 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- Visit(proxy);
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type = (prop->key()->context() == Expression::kUninitialized)
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && expr->context() != Expression::kEffect) {
+ ASSERT(expr->context() != Expression::kUninitialized);
+ __ mov(ip, Operand(Smi::FromInt(0)));
+ __ push(ip);
+ }
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop, Expression::kValue);
+ } else {
+ Visit(prop->key());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ EmitKeyedPropertyLoad(prop, Expression::kValue);
+ }
+ }
+
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_JS);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(r0);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kTestValue:
+ case Expression::kValueTest:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ break;
+ case NAMED_PROPERTY:
+ __ str(r0, MemOperand(sp, kPointerSize));
+ break;
+ case KEYED_PROPERTY:
+ __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ break;
+ }
+ break;
+ }
}
+
// Call runtime for +1/-1.
- __ push(r0);
- __ mov(ip, Operand(Smi::FromInt(1)));
- __ push(ip);
if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
+ __ mov(ip, Operand(Smi::FromInt(1)));
} else {
- __ CallRuntime(Runtime::kNumberSub, 2);
+ __ mov(ip, Operand(Smi::FromInt(-1)));
}
- // Call Store IC.
- __ mov(r2, Operand(proxy->AsVariable()->name()));
- __ ldr(ip, CodeGenerator::GlobalObject());
- __ push(ip);
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ Call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC.
- __ add(sp, sp, Operand(kPointerSize));
+ __ stm(db_w, sp, ip.bit() | r0.bit());
+ __ CallRuntime(Runtime::kNumberAdd, 2);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
- break;
- case Expression::kTest:
- __ pop(r0);
- TestAndBranch(r0, true_label_, false_label_);
+ // Store the value returned in r0.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(r0);
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ expr->context());
+ }
break;
- case Expression::kValueTest: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- TestAndBranch(r0, true_label_, &discard);
- __ bind(&discard);
- __ add(sp, sp, Operand(kPointerSize));
- __ b(false_label_);
+ case NAMED_PROPERTY: {
+ __ mov(r2, Operand(prop->key()->AsLiteral()->handle()));
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(1, expr->context(), r0);
+ }
break;
}
- case Expression::kTestValue: {
- Label discard;
- __ ldr(r0, MemOperand(sp));
- TestAndBranch(r0, &discard, false_label_);
- __ bind(&discard);
- __ add(sp, sp, Operand(kPointerSize));
- __ b(true_label_);
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ Call(ic, RelocInfo::CODE_TARGET);
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(2, expr->context(), r0);
+ }
break;
}
}
@@ -1511,7 +1517,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
GenericBinaryOpStub stub(expr->op(),
NO_OVERWRITE);
__ CallStub(&stub);
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
break;
}
@@ -1528,47 +1534,41 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label push_true, push_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &push_true;
+ if_false = &push_false;
break;
-
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &push_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &push_false;
break;
}
- // Convert current context to test context: End pre-test code.
switch (expr->op()) {
case Token::IN: {
__ InvokeBuiltin(Builtins::IN, CALL_JS);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ cmp(r0, ip);
- __ b(eq, true_label_);
- __ jmp(false_label_);
+ __ b(eq, if_true);
+ __ jmp(if_false);
break;
}
@@ -1576,8 +1576,8 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ tst(r0, r0);
- __ b(eq, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ b(eq, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
@@ -1628,24 +1628,29 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow_case);
__ cmp(r1, r0);
- __ b(cc, true_label_);
- __ jmp(false_label_);
+ __ b(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ tst(r0, r0);
- __ b(cc, true_label_);
- __ jmp(false_label_);
+ __ b(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
case Expression::kValue:
__ bind(&push_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
@@ -1657,10 +1662,6 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&done);
break;
- case Expression::kEffect:
- __ bind(&done);
- break;
-
case Expression::kTest:
break;
@@ -1668,25 +1669,22 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&push_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
__ push(ip);
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
__ bind(&push_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ push(ip);
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), r0);
+ Apply(expr->context(), r0);
}
@@ -1735,12 +1733,6 @@ void FastCodeGenerator::ExitFinallyBlock() {
}
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index b57aa9396..a1f26130a 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -618,6 +618,15 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ---------- S t a t e --------------
+ // -- lr : return address
+ // -- sp[0] : key
+ // -- sp[4] : receiver
+ GenerateGeneric(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// TODO(476): port specialized code.
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 876eec109..18cadaca3 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -162,9 +162,9 @@ void MacroAssembler::StackLimitCheck(Label* on_stack_overflow) {
}
-void MacroAssembler::Drop(int stack_elements, Condition cond) {
- if (stack_elements > 0) {
- add(sp, sp, Operand(stack_elements * kPointerSize), LeaveCC, cond);
+void MacroAssembler::Drop(int count, Condition cond) {
+ if (count > 0) {
+ add(sp, sp, Operand(count * kPointerSize), LeaveCC, cond);
}
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 88bfa9ce0..8f2064a74 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -64,7 +64,11 @@ class MacroAssembler: public Assembler {
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(Condition cond = al);
- void Drop(int stack_elements, Condition cond = al);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the sp register.
+ void Drop(int count, Condition cond = al);
+
void Call(Label* target);
void Move(Register dst, Handle<Object> value);
// Jumps to the label at the index given by the Smi in "index".
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index 24b6a9c81..5ea775104 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -59,15 +59,19 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
+ * - direct_call (if 1, direct call from JavaScript code, if 0 call
+ * through the runtime system)
* - stack_area_base (High end of the memory area to use as
* backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
* --- sp when called ---
* - link address
* - backup of registers r4..r11
- * - int* capture_array (int[num_saved_registers_], for output).
* - end of input (Address of end of string)
* - start of input (Address of first character in string)
+ * - start index (character index of start)
* --- frame pointer ----
* - void* input_string (location of a handle containing the string)
* - Offset of location before start of input (effectively character
@@ -85,11 +89,13 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
* The call is performed by NativeRegExpMacroAssembler::Execute()
* (in regexp-macro-assembler.cc).
*/
@@ -459,8 +465,6 @@ void RegExpMacroAssemblerARM::CheckNotCharacterAfterMinusAnd(
bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@@ -469,11 +473,6 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmp(current_character(), Operand(' '));
__ b(eq, &success);
@@ -487,11 +486,6 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), Operand(' '));
@@ -504,33 +498,18 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ sub(r0, current_character(), Operand('0'));
__ cmp(current_character(), Operand('9' - '0'));
BranchOrBacktrack(hi, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ sub(r0, current_character(), Operand('0'));
__ cmp(r0, Operand('9' - '0'));
BranchOrBacktrack(ls, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
__ eor(r0, current_character(), Operand(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
__ sub(r0, r0, Operand(0x0b));
@@ -546,13 +525,71 @@ bool RegExpMacroAssemblerARM::CheckSpecialCharacterClass(uc16 type,
}
return true;
}
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ eor(r0, current_character(), Operand(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(r0, r0, Operand(0x0b));
+ __ cmp(r0, Operand(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(hi, on_no_match);
+ } else {
+ Label done;
+ __ b(ls, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(r0, r0, Operand(0x2028 - 0x0b));
+ __ cmp(r0, Operand(1));
+ BranchOrBacktrack(hi, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ // Match word character (0-9, A-Z, a-z and _).
+ Label digits, done;
+ __ cmp(current_character(), Operand('9'));
+ __ b(ls, &digits);
+ __ cmp(current_character(), Operand('_'));
+ __ b(eq, &done);
+ __ orr(r0, current_character(), Operand(0x20));
+ __ sub(r0, r0, Operand('a'));
+ __ cmp(r0, Operand('z' - 'a'));
+ BranchOrBacktrack(hi, on_no_match);
+ __ jmp(&done);
+
+ __ bind(&digits);
+ __ cmp(current_character(), Operand('0'));
+ BranchOrBacktrack(lo, on_no_match);
+ __ bind(&done);
+
+ return true;
+ }
+ case 'W': {
+ // Match non-word character (not 0-9, A-Z, a-z and _).
+ Label digits, done;
+ __ cmp(current_character(), Operand('9'));
+ __ b(ls, &digits);
+ __ cmp(current_character(), Operand('_'));
+ BranchOrBacktrack(eq, on_no_match);
+ __ orr(r0, current_character(), Operand(0x20));
+ __ sub(r0, r0, Operand('a'));
+ __ cmp(r0, Operand('z' - 'a'));
+ BranchOrBacktrack(ls, on_no_match);
+ __ jmp(&done);
+
+ __ bind(&digits);
+ __ cmp(current_character(), Operand('0'));
+ BranchOrBacktrack(hs, on_no_match);
+ __ bind(&done);
+
+ return true;
+ }
case '*':
// Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
- }
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.h b/deps/v8/src/arm/regexp-macro-assembler-arm.h
index f70bc0554..4459859a3 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.h
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.h
@@ -80,8 +80,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
@@ -127,6 +125,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kReturnAddress + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index 3a4bb311b..3ce5b7a6b 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -62,9 +62,9 @@ class SimulatorStack : public v8::internal::AllStatic {
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
@@ -79,9 +79,9 @@ class SimulatorStack : public v8::internal::AllStatic {
assembler::arm::Simulator::current()->Call(FUNCTION_ADDR(entry), 5, \
p0, p1, p2, p3, p4))
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
assembler::arm::Simulator::current()->Call( \
- FUNCTION_ADDR(entry), 7, p0, p1, p2, p3, p4, p5, p6)
+ FUNCTION_ADDR(entry), 8, p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
diff --git a/deps/v8/src/arm/virtual-frame-arm.cc b/deps/v8/src/arm/virtual-frame-arm.cc
index 132c8aebc..a33ebd420 100644
--- a/deps/v8/src/arm/virtual-frame-arm.cc
+++ b/deps/v8/src/arm/virtual-frame-arm.cc
@@ -143,12 +143,25 @@ void VirtualFrame::AllocateStackSlots() {
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
- // Initialize stack slots with 'undefined' value.
+ // Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- }
- __ LoadRoot(r2, Heap::kStackLimitRootIndex);
- for (int i = 0; i < count; i++) {
- __ push(ip);
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
+ if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ for (int i = 0; i < count; i++) {
+ __ push(ip);
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ __ mov(r1, Operand(count));
+ __ bind(&alloc_locals_loop);
+ __ push(ip);
+ __ sub(r1, r1, Operand(1), SetCC);
+ __ b(ne, &alloc_locals_loop);
+ }
+ } else {
+ __ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
// Check the stack for overflow or a break request.
// Put the lr setup instruction in the delay slot. The kInstrSize is added
@@ -387,6 +400,13 @@ void VirtualFrame::EmitPush(Register reg) {
}
+void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
+ ASSERT(stack_pointer_ == element_count() - 1);
+ Adjust(count);
+ __ stm(db_w, sp, src_regs);
+}
+
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/virtual-frame-arm.h b/deps/v8/src/arm/virtual-frame-arm.h
index d5230007a..b2f0eea60 100644
--- a/deps/v8/src/arm/virtual-frame-arm.h
+++ b/deps/v8/src/arm/virtual-frame-arm.h
@@ -180,6 +180,9 @@ class VirtualFrame : public ZoneObject {
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 5;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -346,6 +349,11 @@ class VirtualFrame : public ZoneObject {
// corresponding push instruction.
void EmitPush(Register reg);
+ // Push multiple registers on the stack and the virtual frame
+ // Register are selected by setting bit in src_regs and
+ // are pushed in decreasing order: r15 .. r0.
+ void EmitPushMultiple(int count, int src_regs);
+
// Push an element on the virtual frame.
void Push(Register reg);
void Push(Handle<Object> value);
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 20d884ee6..c3ab179da 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -70,19 +70,22 @@ function GetSortedArrayKeys(array, intervals) {
// Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
- var builder = new StringBuilder();
var last_key = -1;
var keys_length = keys.length;
+
+ var elements = new $Array(keys_length);
+ var elements_length = 0;
+
for (var i = 0; i < keys_length; i++) {
var key = keys[i];
if (key != last_key) {
var e = array[key];
- if (typeof(e) !== 'string') e = convert(e);
- builder.add(e);
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
last_key = key;
}
}
- return builder.generate();
+ return %StringBuilderConcat(elements, elements_length, '');
}
@@ -107,7 +110,7 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements.
try {
- if (UseSparseVariant(array, length, is_array) && separator === '') {
+ if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
return SparseJoin(array, length, convert);
}
@@ -115,39 +118,37 @@ function Join(array, length, separator, convert) {
if (length == 1) {
var e = array[0];
if (!IS_UNDEFINED(e) || (0 in array)) {
- if (typeof(e) === 'string') return e;
+ if (IS_STRING(e)) return e;
return convert(e);
}
}
- var builder = new StringBuilder();
+ // Construct an array for the elements.
+ var elements;
+ var elements_length = 0;
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
+ elements = new $Array(length);
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e) || (i in array)) {
- if (typeof(e) !== 'string') e = convert(e);
- if (e.length > 0) {
- var elements = builder.elements;
- elements[elements.length] = e;
- }
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
}
}
} else {
+ elements = new $Array(length << 1);
for (var i = 0; i < length; i++) {
var e = array[i];
- if (i != 0) builder.add(separator);
+ if (i != 0) elements[elements_length++] = separator;
if (!IS_UNDEFINED(e) || (i in array)) {
- if (typeof(e) !== 'string') e = convert(e);
- if (e.length > 0) {
- var elements = builder.elements;
- elements[elements.length] = e;
- }
+ if (!IS_STRING(e)) e = convert(e);
+ elements[elements_length++] = e;
}
}
}
- return builder.generate();
+ return %StringBuilderConcat(elements, elements_length, '');
} finally {
// Make sure to pop the visited array no matter what happens.
if (is_array) visited_arrays.pop();
@@ -156,16 +157,15 @@ function Join(array, length, separator, convert) {
function ConvertToString(e) {
- if (typeof(e) === 'string') return e;
if (e == null) return '';
else return ToString(e);
}
function ConvertToLocaleString(e) {
- if (typeof(e) === 'string') return e;
- if (e == null) return '';
- else {
+ if (e == null) {
+ return '';
+ } else {
// e_obj's toLocaleString might be overwritten, check if it is a function.
// Call ToString if toLocaleString is not a function.
// See issue 877615.
@@ -359,16 +359,20 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
- if (IS_UNDEFINED(separator)) separator = ',';
- else separator = ToString(separator);
- return Join(this, ToUint32(this.length), separator, ConvertToString);
+ if (IS_UNDEFINED(separator)) {
+ separator = ',';
+ } else if (!IS_STRING(separator)) {
+ separator = ToString(separator);
+ }
+ var length = TO_UINT32(this.length);
+ return Join(this, length, separator, ConvertToString);
}
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
- var n = ToUint32(this.length);
+ var n = TO_UINT32(this.length);
if (n == 0) {
this.length = n;
return;
@@ -384,7 +388,7 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
- var n = ToUint32(this.length);
+ var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
this[i+n] = %_Arguments(i);
@@ -452,7 +456,7 @@ function SparseReverse(array, len) {
function ArrayReverse() {
- var j = ToUint32(this.length) - 1;
+ var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) {
SparseReverse(this, j+1);
@@ -483,7 +487,7 @@ function ArrayReverse() {
function ArrayShift() {
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
if (len === 0) {
this.length = 0;
@@ -504,7 +508,7 @@ function ArrayShift() {
function ArrayUnshift(arg1) { // length == 1
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
if (IS_ARRAY(this))
@@ -523,7 +527,7 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@@ -568,7 +572,7 @@ function ArraySplice(start, delete_count) {
// compatibility.
if (num_arguments == 0) return;
- var len = ToUint32(this.length);
+ var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
if (start_i < 0) {
@@ -850,7 +854,7 @@ function ArraySort(comparefn) {
return first_undefined;
}
- length = ToUint32(this.length);
+ length = TO_UINT32(this.length);
if (length < 2) return this;
var is_array = IS_ARRAY(this);
@@ -915,7 +919,7 @@ function ArrayForEach(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@@ -933,7 +937,7 @@ function ArraySome(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
@@ -950,25 +954,23 @@ function ArrayEvery(f, receiver) {
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (!f.call(receiver, current, i, this)) return false;
}
}
-
return true;
}
-
function ArrayMap(f, receiver) {
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
- var length = this.length;
+ var length = TO_UINT32(this.length);
var result = new $Array(length);
for (var i = 0; i < length; i++) {
var current = this[i];
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 2d16250cc..a736fb13f 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -674,6 +674,19 @@ ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
+
+ExternalReference ExternalReference::address_of_static_offsets_vector() {
+ return ExternalReference(OffsetsVector::static_offsets_vector_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
+ return ExternalReference(RegExpStack::memory_address());
+}
+
+ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
+ return ExternalReference(RegExpStack::memory_size_address());
+}
+
#endif
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 87cde9bdf..e5efe89b6 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -420,6 +420,11 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
+ // Static variables for RegExp.
+ static ExternalReference address_of_static_offsets_vector();
+ static ExternalReference address_of_regexp_stack_memory_address();
+ static ExternalReference address_of_regexp_stack_memory_size();
+
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference heap_always_allocate_scope_depth();
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 90b5ed68a..4edcf6d84 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -433,7 +433,7 @@ void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
} else {
stream()->Add("%i ", that->max());
}
- stream()->Add(that->is_greedy() ? "g " : "n ");
+ stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
that->body()->Accept(this, data);
stream()->Add(")");
return NULL;
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 195fc14ed..3961cb81d 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -187,6 +187,11 @@ class Expression: public AstNode {
virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
+ // Symbols that cannot be parsed as array indices are considered property
+ // names. We do not treat symbols that can be array indexes as property
+ // names because [] for string objects is handled only by keyed ICs.
+ virtual bool IsPropertyName() { return false; }
+
// Mark the expression as being compiled as an expression
// statement. This is used to transform postfix increments to
// (faster) prefix increments.
@@ -642,21 +647,20 @@ class TryStatement: public Statement {
class TryCatchStatement: public TryStatement {
public:
TryCatchStatement(Block* try_block,
- Expression* catch_var,
+ VariableProxy* catch_var,
Block* catch_block)
: TryStatement(try_block),
catch_var_(catch_var),
catch_block_(catch_block) {
- ASSERT(catch_var->AsVariableProxy() != NULL);
}
virtual void Accept(AstVisitor* v);
- Expression* catch_var() const { return catch_var_; }
+ VariableProxy* catch_var() const { return catch_var_; }
Block* catch_block() const { return catch_block_; }
private:
- Expression* catch_var_;
+ VariableProxy* catch_var_;
Block* catch_block_;
};
@@ -707,6 +711,14 @@ class Literal: public Expression {
virtual bool IsValidJSON() { return true; }
+ virtual bool IsPropertyName() {
+ if (handle_->IsSymbol()) {
+ uint32_t ignored;
+ return !String::cast(*handle_)->AsArrayIndex(&ignored);
+ }
+ return false;
+ }
+
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@@ -827,24 +839,24 @@ class RegExpLiteral: public MaterializedLiteral {
// for minimizing the work when constructing it at runtime.
class ArrayLiteral: public MaterializedLiteral {
public:
- ArrayLiteral(Handle<FixedArray> literals,
+ ArrayLiteral(Handle<FixedArray> constant_elements,
ZoneList<Expression*>* values,
int literal_index,
bool is_simple,
int depth)
: MaterializedLiteral(literal_index, is_simple, depth),
- literals_(literals),
+ constant_elements_(constant_elements),
values_(values) {}
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsValidJSON();
- Handle<FixedArray> literals() const { return literals_; }
+ Handle<FixedArray> constant_elements() const { return constant_elements_; }
ZoneList<Expression*>* values() const { return values_; }
private:
- Handle<FixedArray> literals_;
+ Handle<FixedArray> constant_elements_;
ZoneList<Expression*>* values_;
};
@@ -1526,6 +1538,7 @@ class CharacterSet BASE_EMBEDDED {
standard_set_type_ = special_set_type;
}
bool is_standard() { return standard_set_type_ != 0; }
+ void Canonicalize();
private:
ZoneList<CharacterRange>* ranges_;
// If non-zero, the value represents a standard set (e.g., all whitespace
@@ -1619,12 +1632,13 @@ class RegExpText: public RegExpTree {
class RegExpQuantifier: public RegExpTree {
public:
- RegExpQuantifier(int min, int max, bool is_greedy, RegExpTree* body)
- : min_(min),
+ enum Type { GREEDY, NON_GREEDY, POSSESSIVE };
+ RegExpQuantifier(int min, int max, Type type, RegExpTree* body)
+ : body_(body),
+ min_(min),
max_(max),
- is_greedy_(is_greedy),
- body_(body),
- min_match_(min * body->min_match()) {
+ min_match_(min * body->min_match()),
+ type_(type) {
if (max > 0 && body->max_match() > kInfinity / max) {
max_match_ = kInfinity;
} else {
@@ -1648,15 +1662,17 @@ class RegExpQuantifier: public RegExpTree {
virtual int max_match() { return max_match_; }
int min() { return min_; }
int max() { return max_; }
- bool is_greedy() { return is_greedy_; }
+ bool is_possessive() { return type_ == POSSESSIVE; }
+ bool is_non_greedy() { return type_ == NON_GREEDY; }
+ bool is_greedy() { return type_ == GREEDY; }
RegExpTree* body() { return body_; }
private:
+ RegExpTree* body_;
int min_;
int max_;
- bool is_greedy_;
- RegExpTree* body_;
int min_match_;
int max_match_;
+ Type type_;
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 6ae31251a..9eacf57a7 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -992,6 +992,7 @@ void Genesis::InstallNativeFunctions() {
INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
INSTALL_NATIVE(JSFunction, "ToBoolean", to_boolean_fun);
+ INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
INSTALL_NATIVE(JSFunction, "ConfigureTemplateInstance",
configure_instance_fun);
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index b66635c50..aa680d76b 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -544,6 +544,11 @@ static void Generate_KeyedLoadIC_Generic(MacroAssembler* masm) {
}
+static void Generate_KeyedLoadIC_String(MacroAssembler* masm) {
+ KeyedLoadIC::GenerateString(masm);
+}
+
+
static void Generate_KeyedLoadIC_ExternalByteArray(MacroAssembler* masm) {
KeyedLoadIC::GenerateExternalArray(masm, kExternalByteArray);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index bc32c4920..f0ceab667 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -74,6 +74,7 @@ namespace internal {
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC) \
+ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalUnsignedByteArray, KEYED_LOAD_IC, MEGAMORPHIC) \
V(KeyedLoadIC_ExternalShortArray, KEYED_LOAD_IC, MEGAMORPHIC) \
@@ -147,7 +148,8 @@ namespace internal {
V(STRING_ADD_LEFT, 1) \
V(STRING_ADD_RIGHT, 1) \
V(APPLY_PREPARE, 1) \
- V(APPLY_OVERFLOW, 1)
+ V(APPLY_OVERFLOW, 1) \
+ V(STRING_CHAR_AT, 1)
class ObjectVisitor;
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index fee92b951..052c1cafa 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -37,6 +37,8 @@ namespace internal {
V(CallFunction) \
V(GenericBinaryOp) \
V(StringAdd) \
+ V(SubString) \
+ V(StringCompare) \
V(SmiOp) \
V(Compare) \
V(RecordWrite) \
@@ -46,12 +48,13 @@ namespace internal {
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
- V(UnarySub) \
+ V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(Instanceof) \
V(CounterOp) \
V(ArgumentsAccess) \
+ V(RegExpExec) \
V(Runtime) \
V(CEntry) \
V(JSEntry)
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 26e8d7de0..fd7e0e80b 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -342,11 +342,12 @@ CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"},
{&CodeGenerator::GenerateRandomPositiveSmi, "_RandomPositiveSmi"},
- {&CodeGenerator::GenerateMathSin, "_Math_sin"},
- {&CodeGenerator::GenerateMathCos, "_Math_cos"},
{&CodeGenerator::GenerateIsObject, "_IsObject"},
{&CodeGenerator::GenerateIsFunction, "_IsFunction"},
{&CodeGenerator::GenerateStringAdd, "_StringAdd"},
+ {&CodeGenerator::GenerateSubString, "_SubString"},
+ {&CodeGenerator::GenerateStringCompare, "_StringCompare"},
+ {&CodeGenerator::GenerateRegExpExec, "_RegExpExec"},
};
@@ -450,6 +451,23 @@ const char* RuntimeStub::GetName() {
}
+const char* GenericUnaryOpStub::GetName() {
+ switch (op_) {
+ case Token::SUB:
+ return overwrite_
+ ? "GenericUnaryOpStub_SUB_Overwrite"
+ : "GenericUnaryOpStub_SUB_Alloc";
+ case Token::BIT_NOT:
+ return overwrite_
+ ? "GenericUnaryOpStub_BIT_NOT_Overwrite"
+ : "GenericUnaryOpStub_BIT_NOT_Alloc";
+ default:
+ UNREACHABLE();
+ return "<unknown>";
+ }
+}
+
+
void RuntimeStub::Generate(MacroAssembler* masm) {
Runtime::Function* f = Runtime::FunctionForId(id_);
masm->TailCallRuntime(ExternalReference(f),
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index b3cf51ed5..2247c5c80 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -294,30 +294,53 @@ class InstanceofStub: public CodeStub {
};
-class UnarySubStub : public CodeStub {
+class GenericUnaryOpStub : public CodeStub {
public:
- explicit UnarySubStub(bool overwrite)
- : overwrite_(overwrite) { }
+ GenericUnaryOpStub(Token::Value op, bool overwrite)
+ : op_(op), overwrite_(overwrite) { }
private:
+ Token::Value op_;
bool overwrite_;
- Major MajorKey() { return UnarySub; }
- int MinorKey() { return overwrite_ ? 1 : 0; }
+
+ class OverwriteField: public BitField<int, 0, 1> {};
+ class OpField: public BitField<Token::Value, 1, kMinorBits - 1> {};
+
+ Major MajorKey() { return GenericUnaryOp; }
+ int MinorKey() {
+ return OpField::encode(op_) | OverwriteField::encode(overwrite_);
+ }
+
void Generate(MacroAssembler* masm);
- const char* GetName() { return "UnarySubStub"; }
+ const char* GetName();
+};
+
+
+enum NaNInformation {
+ kBothCouldBeNaN,
+ kCantBothBeNaN
};
class CompareStub: public CodeStub {
public:
- CompareStub(Condition cc, bool strict) : cc_(cc), strict_(strict) { }
+ CompareStub(Condition cc,
+ bool strict,
+ NaNInformation nan_info = kBothCouldBeNaN) :
+ cc_(cc), strict_(strict), never_nan_nan_(nan_info == kCantBothBeNaN) { }
void Generate(MacroAssembler* masm);
private:
Condition cc_;
bool strict_;
+ // Only used for 'equal' comparisons. Tells the stub that we already know
+ // that at least one side of the comparison is not NaN. This allows the
+ // stub to use object identity in the positive case. We ignore it when
+ // generating the minor key for other comparisons to avoid creating more
+ // stubs.
+ bool never_nan_nan_;
Major MajorKey() { return Compare; }
@@ -329,6 +352,9 @@ class CompareStub: public CodeStub {
Register object,
Register scratch);
+ // Unfortunately you have to run without snapshots to see most of these
+ // names in the profile since most compare stubs end up in the snapshot.
+ const char* GetName();
#ifdef DEBUG
void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n",
@@ -470,6 +496,26 @@ class ArgumentsAccessStub: public CodeStub {
};
+class RegExpExecStub: public CodeStub {
+ public:
+ RegExpExecStub() { }
+
+ private:
+ Major MajorKey() { return RegExpExec; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+
+ const char* GetName() { return "RegExpExecStub"; }
+
+#ifdef DEBUG
+ void Print() {
+ PrintF("RegExpExecStub\n");
+ }
+#endif
+};
+
+
} // namespace internal
} // namespace v8
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 03771d9c4..420b809e7 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -538,7 +538,7 @@ Handle<JSFunction> Compiler::BuildBoilerplate(FunctionLiteral* literal,
LOG(CodeCreateEvent(Logger::FUNCTION_TAG, *code, *literal->name()));
#ifdef ENABLE_OPROFILE_AGENT
- OProfileAgent::CreateNativeCodeRegion(*node->name(),
+ OProfileAgent::CreateNativeCodeRegion(*literal->name(),
code->instruction_start(),
code->instruction_size());
#endif
@@ -649,12 +649,6 @@ void CodeGenSelector::VisitStatements(ZoneList<Statement*>* stmts) {
void CodeGenSelector::VisitDeclaration(Declaration* decl) {
Property* prop = decl->proxy()->AsProperty();
if (prop != NULL) {
- // Property rewrites are shared, ensure we are not changing its
- // expression context state.
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
ProcessExpression(prop->key(), Expression::kValue);
}
@@ -746,7 +740,9 @@ void CodeGenSelector::VisitForInStatement(ForInStatement* stmt) {
void CodeGenSelector::VisitTryCatchStatement(TryCatchStatement* stmt) {
- BAILOUT("TryCatchStatement");
+ Visit(stmt->try_block());
+ CHECK_BAILOUT;
+ Visit(stmt->catch_block());
}
@@ -876,7 +872,9 @@ void CodeGenSelector::VisitArrayLiteral(ArrayLiteral* expr) {
void CodeGenSelector::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- BAILOUT("CatchExtensionObject");
+ ProcessExpression(expr->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ ProcessExpression(expr->value(), Expression::kValue);
}
@@ -890,6 +888,9 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
Property* prop = expr->target()->AsProperty();
ASSERT(var == NULL || prop == NULL);
if (var != NULL) {
+ if (var->mode() == Variable::CONST) {
+ BAILOUT("Assignment to const");
+ }
// All global variables are supported.
if (!var->is_global()) {
ASSERT(var->slot() != NULL);
@@ -899,20 +900,12 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
}
}
} else if (prop != NULL) {
- ASSERT(prop->obj()->context() == Expression::kUninitialized ||
- prop->obj()->context() == Expression::kValue);
ProcessExpression(prop->obj(), Expression::kValue);
CHECK_BAILOUT;
// We will only visit the key during code generation for keyed property
// stores. Leave its expression context uninitialized for named
// property stores.
- Literal* lit = prop->key()->AsLiteral();
- uint32_t ignored;
- if (lit == NULL ||
- !lit->handle()->IsSymbol() ||
- String::cast(*(lit->handle()))->AsArrayIndex(&ignored)) {
- ASSERT(prop->key()->context() == Expression::kUninitialized ||
- prop->key()->context() == Expression::kValue);
+ if (!prop->key()->IsPropertyName()) {
ProcessExpression(prop->key(), Expression::kValue);
CHECK_BAILOUT;
}
@@ -926,7 +919,7 @@ void CodeGenSelector::VisitAssignment(Assignment* expr) {
void CodeGenSelector::VisitThrow(Throw* expr) {
- BAILOUT("Throw");
+ ProcessExpression(expr->exception(), Expression::kValue);
}
@@ -1018,11 +1011,32 @@ void CodeGenSelector::VisitUnaryOperation(UnaryOperation* expr) {
void CodeGenSelector::VisitCountOperation(CountOperation* expr) {
- // We support postfix count operations on global variables.
- if (expr->is_prefix()) BAILOUT("Prefix CountOperation");
Variable* var = expr->expression()->AsVariableProxy()->AsVariable();
- if (var == NULL || !var->is_global()) BAILOUT("non-global postincrement");
- ProcessExpression(expr->expression(), Expression::kValue);
+ Property* prop = expr->expression()->AsProperty();
+ ASSERT(var == NULL || prop == NULL);
+ if (var != NULL) {
+ // All global variables are supported.
+ if (!var->is_global()) {
+ ASSERT(var->slot() != NULL);
+ Slot::Type type = var->slot()->type();
+ if (type == Slot::LOOKUP) {
+ BAILOUT("CountOperation with lookup slot");
+ }
+ }
+ } else if (prop != NULL) {
+ ProcessExpression(prop->obj(), Expression::kValue);
+ CHECK_BAILOUT;
+ // We will only visit the key during code generation for keyed property
+ // stores. Leave its expression context uninitialized for named
+ // property stores.
+ if (!prop->key()->IsPropertyName()) {
+ ProcessExpression(prop->key(), Expression::kValue);
+ CHECK_BAILOUT;
+ }
+ } else {
+ // This is a throw reference error.
+ BAILOUT("CountOperation non-variable/non-property expression");
+ }
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index bdfc40b04..66c157595 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -77,6 +77,7 @@ enum ContextLookupFlags {
V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
V(TO_BOOLEAN_FUN_INDEX, JSFunction, to_boolean_fun) \
+ V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
@@ -202,6 +203,7 @@ class Context: public FixedArray {
TO_UINT32_FUN_INDEX,
TO_INT32_FUN_INDEX,
TO_BOOLEAN_FUN_INDEX,
+ GLOBAL_EVAL_FUN_INDEX,
INSTANTIATE_FUN_INDEX,
CONFIGURE_INSTANCE_FUN_INDEX,
SPECIAL_FUNCTION_TABLE_INDEX,
diff --git a/deps/v8/src/date-delay.js b/deps/v8/src/date-delay.js
index 0778dc99c..7d8f45888 100644
--- a/deps/v8/src/date-delay.js
+++ b/deps/v8/src/date-delay.js
@@ -45,12 +45,6 @@ function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
}
-// ECMA 262 - 15.9.1.2
-function Day(time) {
- return FLOOR(time / msPerDay);
-}
-
-
// ECMA 262 - 5.2
function Modulo(value, remainder) {
var mod = value % remainder;
@@ -86,30 +80,13 @@ function TimeFromYear(year) {
}
-function YearFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).year;
-}
-
-
function InLeapYear(time) {
- return DaysInYear(YearFromTime(time)) == 366 ? 1 : 0;
-}
-
-
-// ECMA 262 - 15.9.1.4
-function MonthFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).month;
+ return DaysInYear(YEAR_FROM_TIME(time)) == 366 ? 1 : 0;
}
function DayWithinYear(time) {
- return Day(time) - DayFromYear(YearFromTime(time));
-}
-
-
-// ECMA 262 - 15.9.1.5
-function DateFromTime(time) {
- return FromJulianDay(Day(time) + kDayZeroInJulianDay).date;
+ return DAY(time) - DayFromYear(YEAR_FROM_TIME(time));
}
@@ -136,7 +113,7 @@ function EquivalentTime(t) {
// we must do this, but for compatibility with other browsers, we use
// the actual year if it is in the range 1970..2037
if (t >= 0 && t <= 2.1e12) return t;
- var day = MakeDay(EquivalentYear(YearFromTime(t)), MonthFromTime(t), DateFromTime(t));
+ var day = MakeDay(EquivalentYear(YEAR_FROM_TIME(t)), MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return TimeClip(MakeDate(day, TimeWithinDay(t)));
}
@@ -232,7 +209,7 @@ function LocalTimezone(t) {
function WeekDay(time) {
- return Modulo(Day(time) + 4, 7);
+ return Modulo(DAY(time) + 4, 7);
}
var local_time_offset = %DateLocalTimeOffset();
@@ -243,7 +220,14 @@ function LocalTime(time) {
}
function LocalTimeNoCheck(time) {
- return time + local_time_offset + DaylightSavingsOffset(time);
+ // Inline the DST offset cache checks for speed.
+ var cache = DST_offset_cache;
+ if (cache.start <= time && time <= cache.end) {
+ var dst_offset = cache.offset;
+ } else {
+ var dst_offset = DaylightSavingsOffset(time);
+ }
+ return time + local_time_offset + dst_offset;
}
@@ -254,27 +238,6 @@ function UTC(time) {
}
-// ECMA 262 - 15.9.1.10
-function HourFromTime(time) {
- return Modulo(FLOOR(time / msPerHour), HoursPerDay);
-}
-
-
-function MinFromTime(time) {
- return Modulo(FLOOR(time / msPerMinute), MinutesPerHour);
-}
-
-
-function SecFromTime(time) {
- return Modulo(FLOOR(time / msPerSecond), SecondsPerMinute);
-}
-
-
-function msFromTime(time) {
- return Modulo(time, msPerSecond);
-}
-
-
// ECMA 262 - 15.9.1.11
function MakeTime(hour, min, sec, ms) {
if (!$isFinite(hour)) return $NaN;
@@ -468,7 +431,7 @@ var Date_cache = {
value = DateParse(year);
if (!NUMBER_IS_NAN(value)) {
cache.time = value;
- cache.year = YearFromTime(LocalTimeNoCheck(value));
+ cache.year = YEAR_FROM_TIME(LocalTimeNoCheck(value));
cache.string = year;
}
}
@@ -508,60 +471,59 @@ function GetTimeFrom(aDate) {
return DATE_VALUE(aDate);
}
-
function GetMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return msFromTime(LocalTimeNoCheck(t));
+ return MS_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMillisecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return msFromTime(t);
+ return MS_FROM_TIME(t);
}
function GetSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return SecFromTime(LocalTimeNoCheck(t));
+ return SEC_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCSecondsFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return SecFromTime(t);
+ return SEC_FROM_TIME(t);
}
function GetMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MinFromTime(LocalTimeNoCheck(t));
+ return MIN_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMinutesFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MinFromTime(t);
+ return MIN_FROM_TIME(t);
}
function GetHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return HourFromTime(LocalTimeNoCheck(t));
+ return HOUR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCHoursFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return HourFromTime(t);
+ return HOUR_FROM_TIME(t);
}
@@ -570,42 +532,42 @@ function GetFullYearFrom(aDate) {
if (NUMBER_IS_NAN(t)) return t;
var cache = Date_cache;
if (cache.time === t) return cache.year;
- return YearFromTime(LocalTimeNoCheck(t));
+ return YEAR_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCFullYearFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return YearFromTime(t);
+ return YEAR_FROM_TIME(t);
}
function GetMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(LocalTimeNoCheck(t));
+ return MONTH_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCMonthFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return MonthFromTime(t);
+ return MONTH_FROM_TIME(t);
}
function GetDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return DateFromTime(LocalTimeNoCheck(t));
+ return DATE_FROM_TIME(LocalTimeNoCheck(t));
}
function GetUTCDateFrom(aDate) {
var t = DATE_VALUE(aDate);
if (NUMBER_IS_NAN(t)) return t;
- return DateFromTime(t);
+ return DATE_FROM_TIME(t);
}
@@ -622,7 +584,7 @@ function TwoDigitString(value) {
function DateString(time) {
- var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+ var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return WeekDays[WeekDay(time)] + ' '
+ Months[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ' '
@@ -635,7 +597,7 @@ var LongMonths = ['January', 'February', 'March', 'April', 'May', 'June', 'July'
function LongDateString(time) {
- var YMD = FromJulianDay(Day(time) + kDayZeroInJulianDay);
+ var YMD = FromJulianDay(DAY(time) + kDayZeroInJulianDay);
return LongWeekDays[WeekDay(time)] + ', '
+ LongMonths[YMD.month] + ' '
+ TwoDigitString(YMD.date) + ', '
@@ -644,9 +606,9 @@ function LongDateString(time) {
function TimeString(time) {
- return TwoDigitString(HourFromTime(time)) + ':'
- + TwoDigitString(MinFromTime(time)) + ':'
- + TwoDigitString(SecFromTime(time));
+ return TwoDigitString(HOUR_FROM_TIME(time)) + ':'
+ + TwoDigitString(MIN_FROM_TIME(time)) + ':'
+ + TwoDigitString(SEC_FROM_TIME(time));
}
@@ -892,8 +854,8 @@ function DateSetTime(ms) {
function DateSetMilliseconds(ms) {
var t = LocalTime(DATE_VALUE(this));
ms = ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -901,8 +863,8 @@ function DateSetMilliseconds(ms) {
function DateSetUTCMilliseconds(ms) {
var t = DATE_VALUE(this);
ms = ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), SecFromTime(t), ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), SEC_FROM_TIME(t), ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -911,8 +873,8 @@ function DateSetSeconds(sec, ms) {
var t = LocalTime(DATE_VALUE(this));
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -921,8 +883,8 @@ function DateSetUTCSeconds(sec, ms) {
var t = DATE_VALUE(this);
sec = ToNumber(sec);
ms = %_ArgumentsLength() < 2 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), MinFromTime(t), sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), MIN_FROM_TIME(t), sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -933,8 +895,8 @@ function DateSetMinutes(min, sec, ms) {
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -945,8 +907,8 @@ function DateSetUTCMinutes(min, sec, ms) {
var argc = %_ArgumentsLength();
sec = argc < 2 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 3 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
- var time = MakeTime(HourFromTime(t), min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ var time = MakeTime(HOUR_FROM_TIME(t), min, sec, ms);
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -959,7 +921,7 @@ function DateSetHours(hour, min, sec, ms) {
sec = argc < 3 ? GetSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(UTC(MakeDate(Day(t), time))));
+ return %_SetValueOf(this, TimeClip(UTC(MakeDate(DAY(t), time))));
}
@@ -972,7 +934,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
sec = argc < 3 ? GetUTCSecondsFrom(this) : ToNumber(sec);
ms = argc < 4 ? GetUTCMillisecondsFrom(this) : ToNumber(ms);
var time = MakeTime(hour, min, sec, ms);
- return %_SetValueOf(this, TimeClip(MakeDate(Day(t), time)));
+ return %_SetValueOf(this, TimeClip(MakeDate(DAY(t), time)));
}
@@ -980,7 +942,7 @@ function DateSetUTCHours(hour, min, sec, ms) {
function DateSetDate(date) {
var t = LocalTime(DATE_VALUE(this));
date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -989,7 +951,7 @@ function DateSetDate(date) {
function DateSetUTCDate(date) {
var t = DATE_VALUE(this);
date = ToNumber(date);
- var day = MakeDay(YearFromTime(t), MonthFromTime(t), date);
+ var day = MakeDay(YEAR_FROM_TIME(t), MONTH_FROM_TIME(t), date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -999,7 +961,7 @@ function DateSetMonth(month, date) {
var t = LocalTime(DATE_VALUE(this));
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetDateFrom(this) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
+ var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1009,7 +971,7 @@ function DateSetUTCMonth(month, date) {
var t = DATE_VALUE(this);
month = ToNumber(month);
date = %_ArgumentsLength() < 2 ? GetUTCDateFrom(this) : ToNumber(date);
- var day = MakeDay(YearFromTime(t), month, date);
+ var day = MakeDay(YEAR_FROM_TIME(t), month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -1020,8 +982,8 @@ function DateSetFullYear(year, month, date) {
t = NUMBER_IS_NAN(t) ? 0 : LocalTimeNoCheck(t);
year = ToNumber(year);
var argc = %_ArgumentsLength();
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+ date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1033,8 +995,8 @@ function DateSetUTCFullYear(year, month, date) {
if (NUMBER_IS_NAN(t)) t = 0;
var argc = %_ArgumentsLength();
year = ToNumber(year);
- month = argc < 2 ? MonthFromTime(t) : ToNumber(month);
- date = argc < 3 ? DateFromTime(t) : ToNumber(date);
+ month = argc < 2 ? MONTH_FROM_TIME(t) : ToNumber(month);
+ date = argc < 3 ? DATE_FROM_TIME(t) : ToNumber(date);
var day = MakeDay(year, month, date);
return %_SetValueOf(this, TimeClip(MakeDate(day, TimeWithinDay(t))));
}
@@ -1046,9 +1008,9 @@ function DateToUTCString() {
if (NUMBER_IS_NAN(t)) return kInvalidDate;
// Return UTC string of the form: Sat, 31 Jan 1970 23:00:00 GMT
return WeekDays[WeekDay(t)] + ', '
- + TwoDigitString(DateFromTime(t)) + ' '
- + Months[MonthFromTime(t)] + ' '
- + YearFromTime(t) + ' '
+ + TwoDigitString(DATE_FROM_TIME(t)) + ' '
+ + Months[MONTH_FROM_TIME(t)] + ' '
+ + YEAR_FROM_TIME(t) + ' '
+ TimeString(t) + ' GMT';
}
@@ -1057,7 +1019,7 @@ function DateToUTCString() {
function DateGetYear() {
var t = DATE_VALUE(this);
if (NUMBER_IS_NAN(t)) return $NaN;
- return YearFromTime(LocalTimeNoCheck(t)) - 1900;
+ return YEAR_FROM_TIME(LocalTimeNoCheck(t)) - 1900;
}
@@ -1069,7 +1031,7 @@ function DateSetYear(year) {
if (NUMBER_IS_NAN(year)) return %_SetValueOf(this, $NaN);
year = (0 <= TO_INTEGER(year) && TO_INTEGER(year) <= 99)
? 1900 + TO_INTEGER(year) : year;
- var day = MakeDay(year, MonthFromTime(t), DateFromTime(t));
+ var day = MakeDay(year, MONTH_FROM_TIME(t), DATE_FROM_TIME(t));
return %_SetValueOf(this, TimeClip(UTC(MakeDate(day, TimeWithinDay(t)))));
}
@@ -1086,16 +1048,19 @@ function DateToGMTString() {
}
-function PadInt(n) {
- // Format integers to have at least two digits.
- return n < 10 ? '0' + n : n;
+function PadInt(n, digits) {
+ if (digits == 1) return n;
+ return n < MathPow(10, digits - 1) ? '0' + PadInt(n, digits - 1) : n;
}
function DateToISOString() {
- return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
- '-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
- ':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
+ var t = DATE_VALUE(this);
+ if (NUMBER_IS_NAN(t)) return kInvalidDate;
+ return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1, 2) +
+ '-' + PadInt(this.getUTCDate(), 2) + 'T' + PadInt(this.getUTCHours(), 2) +
+ ':' + PadInt(this.getUTCMinutes(), 2) + ':' + PadInt(this.getUTCSeconds(), 2) +
+ '.' + PadInt(this.getUTCMilliseconds(), 3) +
'Z';
}
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 2c4552eff..fbe09391e 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -2483,7 +2483,24 @@ Handle<Object> Debugger::Call(Handle<JSFunction> fun,
}
-bool Debugger::StartAgent(const char* name, int port) {
+static void StubMessageHandler2(const v8::Debug::Message& message) {
+ // Simply ignore message.
+}
+
+
+bool Debugger::StartAgent(const char* name, int port,
+ bool wait_for_connection) {
+ if (wait_for_connection) {
+ // Suspend V8 if it is already running or set V8 to suspend whenever
+ // it starts.
+ // Provide stub message handler; V8 auto-continues each suspend
+ // when there is no message handler; we doesn't need it.
+ // Once become suspended, V8 will stay so indefinitely long, until remote
+ // debugger connects and issues "continue" command.
+ Debugger::message_handler_ = StubMessageHandler2;
+ v8::Debug::DebugBreak();
+ }
+
if (Socket::Setup()) {
agent_ = new DebuggerAgent(name, port);
agent_->Start();
diff --git a/deps/v8/src/debug.h b/deps/v8/src/debug.h
index 24f0db413..c37e08b38 100644
--- a/deps/v8/src/debug.h
+++ b/deps/v8/src/debug.h
@@ -636,7 +636,8 @@ class Debugger {
bool* pending_exception);
// Start the debugger agent listening on the provided port.
- static bool StartAgent(const char* name, int port);
+ static bool StartAgent(const char* name, int port,
+ bool wait_for_connection = false);
// Stop the debugger agent.
static void StopAgent();
diff --git a/deps/v8/src/fast-codegen.cc b/deps/v8/src/fast-codegen.cc
index b15a67355..455dd5fab 100644
--- a/deps/v8/src/fast-codegen.cc
+++ b/deps/v8/src/fast-codegen.cc
@@ -67,13 +67,47 @@ int FastCodeGenerator::SlotOffset(Slot* slot) {
case Slot::LOCAL:
offset += JavaScriptFrameConstants::kLocal0Offset;
break;
- default:
+ case Slot::CONTEXT:
+ case Slot::LOOKUP:
UNREACHABLE();
}
return offset;
}
+void FastCodeGenerator::Apply(Expression::Context context, Register reg) {
+ switch (context) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ break;
+ case Expression::kValue:
+ __ push(reg);
+ break;
+ case Expression::kTest:
+ TestAndBranch(reg, true_label_, false_label_);
+ break;
+ case Expression::kValueTest: {
+ Label discard;
+ __ push(reg);
+ TestAndBranch(reg, true_label_, &discard);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
+ break;
+ }
+ case Expression::kTestValue: {
+ Label discard;
+ __ push(reg);
+ TestAndBranch(reg, &discard, false_label_);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ }
+ }
+}
+
+
void FastCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
int length = declarations->length();
@@ -162,7 +196,7 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
- case Expression::kEffect: // Fall through.
+ case Expression::kEffect:
case Expression::kTest:
// The value of the left subexpression is not needed.
expected = Expression::kTest;
@@ -192,36 +226,33 @@ void FastCodeGenerator::EmitLogicalOperation(BinaryOperation* expr) {
#endif
Label eval_right, done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
- // Set up the appropriate context for the left subexpression based on the
- // operation and our own context.
+ // Set up the appropriate context for the left subexpression based
+ // on the operation and our own context. Initially assume we can
+ // inherit both true and false labels from our context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
if (expr->op() == Token::OR) {
- // If there is no usable true label in the OR expression's context, use
- // the end of this expression, otherwise inherit the same true label.
+ // If we are not in some kind of a test context, we did not inherit a
+ // true label from our context. Use the end of the expression.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
- true_label_ = &done;
+ if_true = &done;
}
- // The false label is the label of the second subexpression.
- false_label_ = &eval_right;
+ // The false label is the label of the right subexpression.
+ if_false = &eval_right;
} else {
ASSERT_EQ(Token::AND, expr->op());
- // The true label is the label of the second subexpression.
- true_label_ = &eval_right;
- // If there is no usable false label in the AND expression's context,
- // use the end of the expression, otherwise inherit the same false
- // label.
+ // The true label is the label of the right subexpression.
+ if_true = &eval_right;
+ // If we are not in some kind of a test context, we did not inherit a
+ // false label from our context. Use the end of the expression.
if (expr->context() == Expression::kEffect ||
expr->context() == Expression::kValue) {
- false_label_ = &done;
+ if_false = &done;
}
}
-
- Visit(expr->left());
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->left(), if_true, if_false);
__ bind(&eval_right);
Visit(expr->right());
@@ -254,19 +285,11 @@ void FastCodeGenerator::VisitEmptyStatement(EmptyStatement* stmt) {
void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
Comment cmnt(masm_, "[ IfStatement");
- // Expressions cannot recursively enter statements, there are no labels in
- // the state.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
+ SetStatementPosition(stmt);
Label then_part, else_part, done;
// Do not worry about optimizing for empty then or else bodies.
- true_label_ = &then_part;
- false_label_ = &else_part;
- ASSERT(stmt->condition()->context() == Expression::kTest);
- Visit(stmt->condition());
- true_label_ = NULL;
- false_label_ = NULL;
+ VisitForControl(stmt->condition(), &then_part, &else_part);
__ bind(&then_part);
Visit(stmt->then_statement());
@@ -281,6 +304,7 @@ void FastCodeGenerator::VisitIfStatement(IfStatement* stmt) {
void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
Comment cmnt(masm_, "[ ContinueStatement");
+ SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsContinueTarget(stmt->target())) {
@@ -296,6 +320,7 @@ void FastCodeGenerator::VisitContinueStatement(ContinueStatement* stmt) {
void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
Comment cmnt(masm_, "[ BreakStatement");
+ SetStatementPosition(stmt);
NestedStatement* current = nesting_stack_;
int stack_depth = 0;
while (!current->IsBreakTarget(stmt->target())) {
@@ -311,6 +336,7 @@ void FastCodeGenerator::VisitBreakStatement(BreakStatement* stmt) {
void FastCodeGenerator::VisitReturnStatement(ReturnStatement* stmt) {
Comment cmnt(masm_, "[ ReturnStatement");
+ SetStatementPosition(stmt);
Expression* expr = stmt->expression();
// Complete the statement based on the type of the subexpression.
if (expr->AsLiteral() != NULL) {
@@ -372,6 +398,7 @@ void FastCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
+ SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@@ -384,17 +411,8 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- // We are not in an expression context because we have been compiling
- // statements. Set up a test expression context for the condition.
__ bind(loop_statement.continue_target());
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
- true_label_ = &body;
- false_label_ = loop_statement.break_target();
- ASSERT(stmt->cond()->context() == Expression::kTest);
- Visit(stmt->cond());
- true_label_ = NULL;
- false_label_ = NULL;
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
@@ -409,6 +427,7 @@ void FastCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
Comment cmnt(masm_, "[ WhileStatement");
+ SetStatementPosition(stmt);
Label body, stack_limit_hit, stack_check_success;
Iteration loop_statement(this, stmt);
@@ -425,16 +444,7 @@ void FastCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
__ StackLimitCheck(&stack_limit_hit);
__ bind(&stack_check_success);
- // We are not in an expression context because we have been compiling
- // statements. Set up a test expression context for the condition.
- ASSERT_EQ(NULL, true_label_);
- ASSERT_EQ(NULL, false_label_);
- true_label_ = &body;
- false_label_ = loop_statement.break_target();
- ASSERT(stmt->cond()->context() == Expression::kTest);
- Visit(stmt->cond());
- true_label_ = NULL;
- false_label_ = NULL;
+ VisitForControl(stmt->cond(), &body, loop_statement.break_target());
__ bind(&stack_limit_hit);
StackCheckStub stack_stub;
@@ -457,11 +467,52 @@ void FastCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
void FastCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ TryCatchStatement");
+ SetStatementPosition(stmt);
+ // The try block adds a handler to the exception handler chain
+ // before entering, and removes it again when exiting normally.
+ // If an exception is thrown during execution of the try block,
+ // control is passed to the handler, which also consumes the handler.
+ // At this point, the exception is in a register, and store it in
+ // the temporary local variable (prints as ".catch-var") before
+ // executing the catch block. The catch block has been rewritten
+ // to introduce a new scope to bind the catch variable and to remove
+ // that scope again afterwards.
+
+ Label try_handler_setup, catch_entry, done;
+
+ __ Call(&try_handler_setup);
+ // Try handler code, exception in result register.
+
+ // Store exception in local .catch variable before executing catch block.
+ {
+ // The catch variable is *always* a variable proxy for a local variable.
+ Variable* catch_var = stmt->catch_var()->AsVariableProxy()->AsVariable();
+ ASSERT_NOT_NULL(catch_var);
+ Slot* variable_slot = catch_var->slot();
+ ASSERT_NOT_NULL(variable_slot);
+ ASSERT_EQ(Slot::LOCAL, variable_slot->type());
+ StoreToFrameField(SlotOffset(variable_slot), result_register());
+ }
+
+ Visit(stmt->catch_block());
+ __ jmp(&done);
+
+ // Try block code. Sets up the exception handler chain.
+ __ bind(&try_handler_setup);
+ {
+ TryCatch try_block(this, &catch_entry);
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_CATCH_HANDLER);
+ Visit(stmt->try_block());
+ __ PopTryHandler();
+ }
+ __ bind(&done);
}
void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+ Comment cmnt(masm_, "[ TryFinallyStatement");
+ SetStatementPosition(stmt);
// Try finally is compiled by setting up a try-handler on the stack while
// executing the try body, and removing it again afterwards.
//
@@ -474,7 +525,7 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// its outward control transfer.
// 3. by exiting the try-block with a thrown exception.
// This can happen in nested function calls. It traverses the try-handler
- // chaing and consumes the try-handler entry before jumping to the
+ // chain and consumes the try-handler entry before jumping to the
// handler code. The handler code then calls the finally-block before
// rethrowing the exception.
//
@@ -497,14 +548,15 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
// is retained by the finally block.
// Call the finally block and then rethrow the exception.
__ Call(&finally_entry);
- ThrowException();
+ __ push(result_register());
+ __ CallRuntime(Runtime::kReThrow, 1);
}
__ bind(&finally_entry);
{
// Finally block implementation.
- EnterFinallyBlock();
Finally finally_block(this);
+ EnterFinallyBlock();
Visit(stmt->finally_block());
ExitFinallyBlock(); // Return to the calling code.
}
@@ -512,9 +564,9 @@ void FastCodeGenerator::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
__ bind(&try_handler_setup);
{
// Setup try handler (stack pointer registers).
- __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
TryFinally try_block(this, &finally_entry);
- VisitStatements(stmt->try_block()->statements());
+ __ PushTryHandler(IN_JAVASCRIPT, TRY_FINALLY_HANDLER);
+ Visit(stmt->try_block());
__ PopTryHandler();
}
// Execute the finally block on the way out.
@@ -546,14 +598,7 @@ void FastCodeGenerator::VisitConditional(Conditional* expr) {
Label true_case, false_case, done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
-
- true_label_ = &true_case;
- false_label_ = &false_case;
- Visit(expr->condition());
- true_label_ = saved_true;
- false_label_ = saved_false;
+ VisitForControl(expr->condition(), &true_case, &false_case);
__ bind(&true_case);
Visit(expr->then_expression());
@@ -581,7 +626,7 @@ void FastCodeGenerator::VisitSlot(Slot* expr) {
void FastCodeGenerator::VisitLiteral(Literal* expr) {
Comment cmnt(masm_, "[ Literal");
- Move(expr->context(), expr);
+ Apply(expr->context(), expr);
}
@@ -634,7 +679,7 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
EmitNamedPropertyLoad(prop, Expression::kValue);
break;
case KEYED_PROPERTY:
- EmitKeyedPropertyLoad(Expression::kValue);
+ EmitKeyedPropertyLoad(prop, Expression::kValue);
break;
}
}
@@ -652,7 +697,8 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
// Store the value.
switch (assign_type) {
case VARIABLE:
- EmitVariableAssignment(expr);
+ EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+ expr->context());
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
@@ -665,12 +711,29 @@ void FastCodeGenerator::VisitAssignment(Assignment* expr) {
void FastCodeGenerator::VisitCatchExtensionObject(CatchExtensionObject* expr) {
- UNREACHABLE();
+ // Call runtime routine to allocate the catch extension object and
+ // assign the exception value to the catch variable.
+ Comment cmnt(masm_, "[ CatchExtensionObject");
+
+ // Push key string.
+ ASSERT_EQ(Expression::kValue, expr->key()->context());
+ Visit(expr->key());
+ ASSERT_EQ(Expression::kValue, expr->value()->context());
+ Visit(expr->value());
+
+ // Create catch extension object.
+ __ CallRuntime(Runtime::kCreateCatchExtensionObject, 2);
+
+ __ push(result_register());
}
void FastCodeGenerator::VisitThrow(Throw* expr) {
- UNREACHABLE();
+ Comment cmnt(masm_, "[ Throw");
+ Visit(expr->exception());
+ // Exception is on stack.
+ __ CallRuntime(Runtime::kThrow, 1);
+ // Never returns here.
}
diff --git a/deps/v8/src/fast-codegen.h b/deps/v8/src/fast-codegen.h
index 54f0df115..ecac8e7fa 100644
--- a/deps/v8/src/fast-codegen.h
+++ b/deps/v8/src/fast-codegen.h
@@ -212,26 +212,48 @@ class FastCodeGenerator: public AstVisitor {
int SlotOffset(Slot* slot);
- void Move(Expression::Context destination, Register source);
- void Move(Expression::Context destination, Slot* source, Register scratch);
- void Move(Expression::Context destination, Literal* source);
+
+ // Emit code to complete the evaluation of an expression based on its
+ // expression context and given its value is in a register, non-lookup
+ // slot, or a literal.
+ void Apply(Expression::Context context, Register reg);
+ void Apply(Expression::Context context, Slot* slot, Register scratch);
+ void Apply(Expression::Context context, Literal* lit);
+
+ // Emit code to complete the evaluation of an expression based on its
+ // expression context and given its value is on top of the stack.
+ void ApplyTOS(Expression::Context context);
+
+ // Emit code to discard count elements from the top of stack, then
+ // complete the evaluation of an expression based on its expression
+ // context and given its value is in a register.
+ void DropAndApply(int count, Expression::Context context, Register reg);
+
void Move(Slot* dst, Register source, Register scratch1, Register scratch2);
void Move(Register dst, Slot* source);
- // Templated to allow for Operand on intel and MemOperand on ARM.
- template <typename MemoryLocation>
- MemoryLocation CreateSlotOperand(Slot* slot, Register scratch);
-
- // Drop the TOS, and store source to destination.
- // If destination is TOS, just overwrite TOS with source.
- void DropAndMove(Expression::Context destination,
- Register source,
- int drop_count = 1);
+ // Return an operand used to read/write to a known (ie, non-LOOKUP) slot.
+ // May emit code to traverse the context chain, destroying the scratch
+ // register.
+ MemOperand EmitSlotSearch(Slot* slot, Register scratch);
// Test the JavaScript value in source as if in a test context, compile
// control flow to a pair of labels.
void TestAndBranch(Register source, Label* true_label, Label* false_label);
+ void VisitForControl(Expression* expr, Label* if_true, Label* if_false) {
+ ASSERT(expr->context() == Expression::kTest ||
+ expr->context() == Expression::kValueTest ||
+ expr->context() == Expression::kTestValue);
+ Label* saved_true = true_label_;
+ Label* saved_false = false_label_;
+ true_label_ = if_true;
+ false_label_ = if_false;
+ Visit(expr);
+ true_label_ = saved_true;
+ false_label_ = saved_false;
+ }
+
void VisitDeclarations(ZoneList<Declaration*>* declarations);
void DeclareGlobals(Handle<FixedArray> pairs);
@@ -247,13 +269,13 @@ class FastCodeGenerator: public AstVisitor {
// Platform-specific support for compiling assignments.
- // Load a value from a named property and push the result on the stack.
+ // Load a value from a named property.
// The receiver is left on the stack by the IC.
void EmitNamedPropertyLoad(Property* expr, Expression::Context context);
- // Load a value from a named property and push the result on the stack.
+ // Load a value from a keyed property.
// The receiver and the key is left on the stack by the IC.
- void EmitKeyedPropertyLoad(Expression::Context context);
+ void EmitKeyedPropertyLoad(Property* expr, Expression::Context context);
// Apply the compound assignment operator. Expects both operands on top
// of the stack.
@@ -261,7 +283,7 @@ class FastCodeGenerator: public AstVisitor {
// Complete a variable assignment. The right-hand-side value is expected
// on top of the stack.
- void EmitVariableAssignment(Assignment* expr);
+ void EmitVariableAssignment(Variable* var, Expression::Context context);
// Complete a named property assignment. The receiver and right-hand-side
// value are expected on top of the stack.
@@ -279,7 +301,6 @@ class FastCodeGenerator: public AstVisitor {
// Non-local control flow support.
void EnterFinallyBlock();
void ExitFinallyBlock();
- void ThrowException();
// Loop nesting counter.
int loop_depth() { return loop_depth_; }
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 88fda123b..f912fc524 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -198,6 +198,9 @@ DEFINE_bool(cleanup_caches_in_maps_at_gc, true,
DEFINE_bool(canonicalize_object_literal_maps, true,
"Canonicalize maps for object literals.")
+DEFINE_bool(use_big_map_space, true,
+ "Use big map space, but don't compact if it grew too big.")
+
// mksnapshot.cc
DEFINE_bool(h, false, "print this message")
DEFINE_bool(new_snapshot, true, "use new snapshot implementation")
@@ -228,6 +231,7 @@ DEFINE_bool(preemption, false,
// Regexp
DEFINE_bool(trace_regexps, false, "trace regexp execution")
DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_bool(regexp_entry_native, true, "use native code to enter regexp")
// Testing flags test/cctest/test-{flags,api,serialization}.cc
DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
@@ -325,6 +329,9 @@ DEFINE_bool(collect_heap_spill_statistics, false,
"(requires heap_stats)")
// Regexp
+DEFINE_bool(regexp_possessive_quantifier,
+ false,
+ "enable possessive quantifier syntax for testing")
DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
DEFINE_bool(trace_regexp_assembler,
false,
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 7c327dd32..2f90a316e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -306,7 +306,7 @@ void StackHandler::Cook(Code* code) {
void StackHandler::Uncook(Code* code) {
- ASSERT(MarkCompactCollector::IsCompacting());
+ ASSERT(MarkCompactCollector::HasCompacted());
set_pc(code->instruction_start() + OffsetFrom(pc()));
ASSERT(code->contains(pc()));
}
@@ -336,7 +336,7 @@ void StackFrame::CookFramesForThread(ThreadLocalTop* thread) {
void StackFrame::UncookFramesForThread(ThreadLocalTop* thread) {
// Only uncooking frames when the collector is compacting and thus moving code
// around.
- ASSERT(MarkCompactCollector::IsCompacting());
+ ASSERT(MarkCompactCollector::HasCompacted());
ASSERT(thread->stack_is_cooked());
for (StackFrameIterator it(thread); !it.done(); it.Advance()) {
it.frame()->Uncook();
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 00fad547c..bd4f86bb9 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -54,7 +54,8 @@ Object* Heap::AllocateRaw(int size_in_bytes,
ASSERT(allocation_allowed_ && gc_state_ == NOT_IN_GC);
ASSERT(space != NEW_SPACE ||
retry_space == OLD_POINTER_SPACE ||
- retry_space == OLD_DATA_SPACE);
+ retry_space == OLD_DATA_SPACE ||
+ retry_space == LO_SPACE);
#ifdef DEBUG
if (FLAG_gc_interval >= 0 &&
!disallow_allocation_failure_ &&
@@ -196,12 +197,23 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
// other object types are promoted to old pointer space. We do not use
// object->IsHeapNumber() and object->IsSeqString() because we already
// know that object has the heap object tag.
- ASSERT((type != CODE_TYPE) && (type != MAP_TYPE));
- bool has_pointers =
- type != HEAP_NUMBER_TYPE &&
- (type >= FIRST_NONSTRING_TYPE ||
- (type & kStringRepresentationMask) != kSeqStringTag);
- return has_pointers ? OLD_POINTER_SPACE : OLD_DATA_SPACE;
+
+ // These objects are never allocated in new space.
+ ASSERT(type != MAP_TYPE);
+ ASSERT(type != CODE_TYPE);
+ ASSERT(type != ODDBALL_TYPE);
+ ASSERT(type != JS_GLOBAL_PROPERTY_CELL_TYPE);
+
+ if (type < FIRST_NONSTRING_TYPE) {
+ // There are three string representations: sequential strings, cons
+ // strings, and external strings. Only cons strings contain
+ // non-map-word pointers to heap objects.
+ return ((type & kStringRepresentationMask) == kConsStringTag)
+ ? OLD_POINTER_SPACE
+ : OLD_DATA_SPACE;
+ } else {
+ return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
+ }
}
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 7a66038b6..fba2e87c2 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -479,6 +479,65 @@ static void VerifySymbolTable() {
}
+void Heap::ReserveSpace(
+ int new_space_size,
+ int pointer_space_size,
+ int data_space_size,
+ int code_space_size,
+ int map_space_size,
+ int cell_space_size,
+ int large_object_size) {
+ NewSpace* new_space = Heap::new_space();
+ PagedSpace* old_pointer_space = Heap::old_pointer_space();
+ PagedSpace* old_data_space = Heap::old_data_space();
+ PagedSpace* code_space = Heap::code_space();
+ PagedSpace* map_space = Heap::map_space();
+ PagedSpace* cell_space = Heap::cell_space();
+ LargeObjectSpace* lo_space = Heap::lo_space();
+ bool gc_performed = true;
+ while (gc_performed) {
+ gc_performed = false;
+ if (!new_space->ReserveSpace(new_space_size)) {
+ Heap::CollectGarbage(new_space_size, NEW_SPACE);
+ gc_performed = true;
+ }
+ if (!old_pointer_space->ReserveSpace(pointer_space_size)) {
+ Heap::CollectGarbage(pointer_space_size, OLD_POINTER_SPACE);
+ gc_performed = true;
+ }
+ if (!(old_data_space->ReserveSpace(data_space_size))) {
+ Heap::CollectGarbage(data_space_size, OLD_DATA_SPACE);
+ gc_performed = true;
+ }
+ if (!(code_space->ReserveSpace(code_space_size))) {
+ Heap::CollectGarbage(code_space_size, CODE_SPACE);
+ gc_performed = true;
+ }
+ if (!(map_space->ReserveSpace(map_space_size))) {
+ Heap::CollectGarbage(map_space_size, MAP_SPACE);
+ gc_performed = true;
+ }
+ if (!(cell_space->ReserveSpace(cell_space_size))) {
+ Heap::CollectGarbage(cell_space_size, CELL_SPACE);
+ gc_performed = true;
+ }
+ // We add a slack-factor of 2 in order to have space for the remembered
+ // set and a series of large-object allocations that are only just larger
+ // than the page size.
+ large_object_size *= 2;
+ // The ReserveSpace method on the large object space checks how much
+ // we can expand the old generation. This includes expansion caused by
+ // allocation in the other spaces.
+ large_object_size += cell_space_size + map_space_size + code_space_size +
+ data_space_size + pointer_space_size;
+ if (!(lo_space->ReserveSpace(large_object_size))) {
+ Heap::CollectGarbage(large_object_size, LO_SPACE);
+ gc_performed = true;
+ }
+ }
+}
+
+
void Heap::EnsureFromSpaceIsCommitted() {
if (new_space_.CommitFromSpaceIfNeeded()) return;
@@ -576,6 +635,8 @@ void Heap::MarkCompactPrologue(bool is_compacting) {
Top::MarkCompactPrologue(is_compacting);
ThreadManager::MarkCompactPrologue(is_compacting);
+
+ if (is_compacting) FlushNumberStringCache();
}
@@ -804,7 +865,8 @@ void Heap::ScavengeExternalStringTable() {
}
}
- ExternalStringTable::ShrinkNewStrings(last - start);
+ ASSERT(last <= end);
+ ExternalStringTable::ShrinkNewStrings(static_cast<int>(last - start));
}
@@ -1354,9 +1416,6 @@ Object* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(HeapNumber::kSize, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result;
@@ -1576,10 +1635,7 @@ bool Heap::CreateInitialObjects() {
CreateFixedStubs();
- // Allocate the number->string conversion cache
- obj = AllocateFixedArray(kNumberStringCacheSize * 2);
- if (obj->IsFailure()) return false;
- set_number_string_cache(FixedArray::cast(obj));
+ if (InitializeNumberStringCache()->IsFailure()) return false;
// Allocate cache for single character strings.
obj = AllocateFixedArray(String::kMaxAsciiCharCode+1);
@@ -1610,25 +1666,45 @@ bool Heap::CreateInitialObjects() {
}
+Object* Heap::InitializeNumberStringCache() {
+ // Compute the size of the number string cache based on the max heap size.
+ // max_semispace_size_ == 512 KB => number_string_cache_size = 32.
+ // max_semispace_size_ == 8 MB => number_string_cache_size = 16KB.
+ int number_string_cache_size = max_semispace_size_ / 512;
+ number_string_cache_size = Max(32, Min(16*KB, number_string_cache_size));
+ Object* obj = AllocateFixedArray(number_string_cache_size * 2);
+ if (!obj->IsFailure()) set_number_string_cache(FixedArray::cast(obj));
+ return obj;
+}
+
+
+void Heap::FlushNumberStringCache() {
+ // Flush the number to string cache.
+ int len = number_string_cache()->length();
+ for (int i = 0; i < len; i++) {
+ number_string_cache()->set_undefined(i);
+ }
+}
+
+
static inline int double_get_hash(double d) {
DoubleRepresentation rep(d);
- return ((static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32)) &
- (Heap::kNumberStringCacheSize - 1));
+ return static_cast<int>(rep.bits) ^ static_cast<int>(rep.bits >> 32);
}
static inline int smi_get_hash(Smi* smi) {
- return (smi->value() & (Heap::kNumberStringCacheSize - 1));
+ return smi->value();
}
-
Object* Heap::GetNumberStringCache(Object* number) {
int hash;
+ int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number));
+ hash = smi_get_hash(Smi::cast(number)) & mask;
} else {
- hash = double_get_hash(number->Number());
+ hash = double_get_hash(number->Number()) & mask;
}
Object* key = number_string_cache()->get(hash * 2);
if (key == number) {
@@ -1644,11 +1720,12 @@ Object* Heap::GetNumberStringCache(Object* number) {
void Heap::SetNumberStringCache(Object* number, String* string) {
int hash;
+ int mask = (number_string_cache()->length() >> 1) - 1;
if (number->IsSmi()) {
- hash = smi_get_hash(Smi::cast(number));
+ hash = smi_get_hash(Smi::cast(number)) & mask;
number_string_cache()->set(hash * 2, number, SKIP_WRITE_BARRIER);
} else {
- hash = double_get_hash(number->Number());
+ hash = double_get_hash(number->Number()) & mask;
number_string_cache()->set(hash * 2, number);
}
number_string_cache()->set(hash * 2 + 1, string);
@@ -1762,7 +1839,6 @@ Object* Heap::AllocateProxy(Address proxy, PretenureFlag pretenure) {
// Statically ensure that it is safe to allocate proxies in paged spaces.
STATIC_ASSERT(Proxy::kSize <= Page::kMaxHeapObjectSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
- if (always_allocate()) space = OLD_DATA_SPACE;
Object* result = Allocate(proxy_map(), space);
if (result->IsFailure()) return result;
@@ -1902,8 +1978,7 @@ Object* Heap::AllocateConsString(String* first, String* second) {
Map* map = is_ascii ? cons_ascii_string_map() : cons_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_POINTER_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ConsString* cons_string = ConsString::cast(result);
WriteBarrierMode mode = cons_string->GetWriteBarrierMode();
@@ -1967,8 +2042,7 @@ Object* Heap::AllocateExternalStringFromAscii(
}
Map* map = external_ascii_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalAsciiString* external_string = ExternalAsciiString::cast(result);
@@ -1989,8 +2063,7 @@ Object* Heap::AllocateExternalStringFromTwoByte(
}
Map* map = Heap::external_string_map();
- Object* result = Allocate(map,
- always_allocate() ? OLD_DATA_SPACE : NEW_SPACE);
+ Object* result = Allocate(map, NEW_SPACE);
if (result->IsFailure()) return result;
ExternalTwoByteString* external_string = ExternalTwoByteString::cast(result);
@@ -2025,15 +2098,16 @@ Object* Heap::LookupSingleCharacterStringFromCode(uint16_t code) {
Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
if (pretenure == NOT_TENURED) {
return AllocateByteArray(length);
}
int size = ByteArray::SizeFor(length);
- AllocationSpace space =
- size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
-
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
-
+ Object* result = (size <= MaxObjectSizeInPagedSpace())
+ ? old_data_space_->AllocateRaw(size)
+ : lo_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@@ -2043,15 +2117,13 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* Heap::AllocateByteArray(int length) {
+ if (length < 0 || length > ByteArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
int size = ByteArray::SizeFor(length);
AllocationSpace space =
- size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = LO_SPACE;
-
+ (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<Array*>(result)->set_map(byte_array_map());
@@ -2076,12 +2148,7 @@ Object* Heap::AllocatePixelArray(int length,
uint8_t* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(PixelArray::kAlignedSize, space, OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<PixelArray*>(result)->set_map(pixel_array_map());
@@ -2097,14 +2164,9 @@ Object* Heap::AllocateExternalArray(int length,
void* external_pointer,
PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
Object* result = AllocateRaw(ExternalArray::kAlignedSize,
space,
OLD_DATA_SPACE);
-
if (result->IsFailure()) return result;
reinterpret_cast<ExternalArray*>(result)->set_map(
@@ -2193,9 +2255,12 @@ Object* Heap::CopyCode(Code* code) {
Object* Heap::Allocate(Map* map, AllocationSpace space) {
ASSERT(gc_state_ == NOT_IN_GC);
ASSERT(map->instance_type() != MAP_TYPE);
- Object* result = AllocateRaw(map->instance_size(),
- space,
- TargetSpaceId(map->instance_type()));
+ // If allocation failures are disallowed, we may allocate in a different
+ // space when new space is full and the object is not a large object.
+ AllocationSpace retry_space =
+ (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+ Object* result =
+ AllocateRaw(map->instance_size(), space, retry_space);
if (result->IsFailure()) return result;
HeapObject::cast(result)->set_map(map);
#ifdef ENABLE_LOGGING_AND_PROFILING
@@ -2383,7 +2448,6 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- if (always_allocate()) space = OLD_POINTER_SPACE;
Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj;
@@ -2658,12 +2722,16 @@ Map* Heap::SymbolMapForString(String* string) {
Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
int chars,
uint32_t hash_field) {
+ ASSERT(chars >= 0);
// Ensure the chars matches the number of characters in the buffer.
ASSERT(static_cast<unsigned>(chars) == buffer->Length());
// Determine whether the string is ascii.
bool is_ascii = true;
- while (buffer->has_more() && is_ascii) {
- if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) is_ascii = false;
+ while (buffer->has_more()) {
+ if (buffer->GetNext() > unibrow::Utf8::kMaxOneByteChar) {
+ is_ascii = false;
+ break;
+ }
}
buffer->Rewind();
@@ -2672,17 +2740,23 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map;
if (is_ascii) {
+ if (chars > SeqAsciiString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
map = ascii_symbol_map();
size = SeqAsciiString::SizeFor(chars);
} else {
+ if (chars > SeqTwoByteString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
map = symbol_map();
size = SeqTwoByteString::SizeFor(chars);
}
// Allocate string.
- AllocationSpace space =
- (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
- Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ Object* result = (size > MaxObjectSizeInPagedSpace())
+ ? lo_space_->AllocateRaw(size)
+ : old_data_space_->AllocateRaw(size);
if (result->IsFailure()) return result;
reinterpret_cast<HeapObject*>(result)->set_map(map);
@@ -2702,22 +2776,28 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
+ if (length < 0 || length > SeqAsciiString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
int size = SeqAsciiString::SizeFor(length);
+ ASSERT(size <= SeqAsciiString::kMaxSize);
+
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace retry_space = OLD_DATA_SPACE;
- Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
- } else {
- if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (size > kMaxObjectSizeInNewSpace) {
+ // Allocate in large object space, retry space will be ignored.
+ space = LO_SPACE;
+ } else if (size > MaxObjectSizeInPagedSpace()) {
+ // Allocate in new space, retry in large object space.
+ retry_space = LO_SPACE;
+ }
+ } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+ space = LO_SPACE;
}
+ Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@@ -2730,22 +2810,26 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
- AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-
- // New space can't cope with forced allocation.
- if (always_allocate()) space = OLD_DATA_SPACE;
-
+ if (length < 0 || length > SeqTwoByteString::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
int size = SeqTwoByteString::SizeFor(length);
+ ASSERT(size <= SeqTwoByteString::kMaxSize);
+ AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+ AllocationSpace retry_space = OLD_DATA_SPACE;
- Object* result = Failure::OutOfMemoryException();
if (space == NEW_SPACE) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRaw(size);
- } else {
- if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
- result = AllocateRaw(size, space, OLD_DATA_SPACE);
+ if (size > kMaxObjectSizeInNewSpace) {
+ // Allocate in large object space, retry space will be ignored.
+ space = LO_SPACE;
+ } else if (size > MaxObjectSizeInPagedSpace()) {
+ // Allocate in new space, retry in large object space.
+ retry_space = LO_SPACE;
+ }
+ } else if (space == OLD_DATA_SPACE && size > MaxObjectSizeInPagedSpace()) {
+ space = LO_SPACE;
}
+ Object* result = AllocateRaw(size, space, retry_space);
if (result->IsFailure()) return result;
// Partially initialize the object.
@@ -2769,6 +2853,9 @@ Object* Heap::AllocateEmptyFixedArray() {
Object* Heap::AllocateRawFixedArray(int length) {
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
// Use the general function if we're forced to always allocate.
if (always_allocate()) return AllocateFixedArray(length, TENURED);
// Allocate the raw data for a fixed array.
@@ -2820,29 +2907,47 @@ Object* Heap::AllocateFixedArray(int length) {
Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+ ASSERT(length >= 0);
ASSERT(empty_fixed_array()->IsFixedArray());
+ if (length < 0 || length > FixedArray::kMaxLength) {
+ return Failure::OutOfMemoryException();
+ }
if (length == 0) return empty_fixed_array();
- // New space can't cope with forced allocation.
- if (always_allocate()) pretenure = TENURED;
-
+ AllocationSpace space =
+ (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
int size = FixedArray::SizeFor(length);
+ if (space == NEW_SPACE && size > kMaxObjectSizeInNewSpace) {
+ // Too big for new space.
+ space = LO_SPACE;
+ } else if (space == OLD_POINTER_SPACE &&
+ size > MaxObjectSizeInPagedSpace()) {
+ // Too big for old pointer space.
+ space = LO_SPACE;
+ }
+
+ // Specialize allocation for the space.
Object* result = Failure::OutOfMemoryException();
- if (pretenure != TENURED) {
- result = size <= kMaxObjectSizeInNewSpace
- ? new_space_.AllocateRaw(size)
- : lo_space_->AllocateRawFixedArray(size);
- }
- if (result->IsFailure()) {
- if (size > MaxObjectSizeInPagedSpace()) {
- result = lo_space_->AllocateRawFixedArray(size);
- } else {
- AllocationSpace space =
- (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
- result = AllocateRaw(size, space, OLD_POINTER_SPACE);
+ if (space == NEW_SPACE) {
+ // We cannot use Heap::AllocateRaw() because it will not properly
+ // allocate extra remembered set bits if always_allocate() is true and
+ // new space allocation fails.
+ result = new_space_.AllocateRaw(size);
+ if (result->IsFailure() && always_allocate()) {
+ if (size <= MaxObjectSizeInPagedSpace()) {
+ result = old_pointer_space_->AllocateRaw(size);
+ } else {
+ result = lo_space_->AllocateRawFixedArray(size);
+ }
}
- if (result->IsFailure()) return result;
+ } else if (space == OLD_POINTER_SPACE) {
+ result = old_pointer_space_->AllocateRaw(size);
+ } else {
+ ASSERT(space == LO_SPACE);
+ result = lo_space_->AllocateRawFixedArray(size);
}
+ if (result->IsFailure()) return result;
+
// Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result);
@@ -3437,7 +3542,10 @@ bool Heap::Setup(bool create_heap_objects) {
if (!code_space_->Setup(NULL, 0)) return false;
// Initialize map space.
- map_space_ = new MapSpace(kMaxMapSpaceSize, MAP_SPACE);
+ map_space_ = new MapSpace(FLAG_use_big_map_space
+ ? max_old_generation_size_
+ : (MapSpace::kMaxMapPageIndex + 1) * Page::kPageSize,
+ MAP_SPACE);
if (map_space_ == NULL) return false;
if (!map_space_->Setup(NULL, 0)) return false;
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index fdc04a8ea..1f044441a 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -269,7 +269,7 @@ class Heap : public AllStatic {
return reinterpret_cast<Address>(&always_allocate_scope_depth_);
}
static bool linear_allocation() {
- return linear_allocation_scope_depth_ != 0;
+ return linear_allocation_scope_depth_ != 0;
}
static Address* NewSpaceAllocationTopAddress() {
@@ -804,9 +804,27 @@ class Heap : public AllStatic {
// Rebuild remembered set in old and map spaces.
static void RebuildRSets();
+ // Update an old object's remembered set
+ static int UpdateRSet(HeapObject* obj);
+
// Commits from space if it is uncommitted.
static void EnsureFromSpaceIsCommitted();
+ // Support for partial snapshots. After calling this we can allocate a
+ // certain number of bytes using only linear allocation (with a
+ // LinearAllocationScope and an AlwaysAllocateScope) without using freelists
+ // or causing a GC. It returns true of space was reserved or false if a GC is
+ // needed. For paged spaces the space requested must include the space wasted
+ // at the end of each page when allocating linearly.
+ static void ReserveSpace(
+ int new_space_size,
+ int pointer_space_size,
+ int data_space_size,
+ int code_space_size,
+ int map_space_size,
+ int cell_space_size,
+ int large_object_size);
+
//
// Support for the API.
//
@@ -820,9 +838,6 @@ class Heap : public AllStatic {
// Update the cache with a new number-string pair.
static void SetNumberStringCache(Object* number, String* str);
- // Entries in the cache. Must be a power of 2.
- static const int kNumberStringCacheSize = 64;
-
// Adjusts the amount of registered external memory.
// Returns the adjusted value.
static inline int AdjustAmountOfExternalAllocatedMemory(int change_in_bytes);
@@ -837,11 +852,15 @@ class Heap : public AllStatic {
> old_gen_promotion_limit_;
}
+ static intptr_t OldGenerationSpaceAvailable() {
+ return old_gen_allocation_limit_ -
+ (PromotedSpaceSize() + PromotedExternalMemorySize());
+ }
+
// True if we have reached the allocation limit in the old generation that
// should artificially cause a GC right now.
static bool OldGenerationAllocationLimitReached() {
- return (PromotedSpaceSize() + PromotedExternalMemorySize())
- > old_gen_allocation_limit_;
+ return OldGenerationSpaceAvailable() < 0;
}
// Can be called when the embedding application is idle.
@@ -890,11 +909,6 @@ class Heap : public AllStatic {
static int linear_allocation_scope_depth_;
static bool context_disposed_pending_;
- // The number of MapSpace pages is limited by the way we pack
- // Map pointers during GC.
- static const int kMaxMapSpaceSize =
- (1 << (MapWord::kMapPageIndexBits)) * Page::kPageSize;
-
#if defined(V8_TARGET_ARCH_X64)
static const int kMaxObjectSizeInNewSpace = 512*KB;
#else
@@ -1060,9 +1074,9 @@ class Heap : public AllStatic {
// Helper function used by CopyObject to copy a source object to an
// allocated target object and update the forwarding pointer in the source
// object. Returns the target object.
- static HeapObject* MigrateObject(HeapObject* source,
- HeapObject* target,
- int size);
+ static inline HeapObject* MigrateObject(HeapObject* source,
+ HeapObject* target,
+ int size);
// Helper function that governs the promotion policy from new space to
// old. If the object's old address lies below the new space's age
@@ -1078,9 +1092,6 @@ class Heap : public AllStatic {
static void ReportStatisticsAfterGC();
#endif
- // Update an old object's remembered set
- static int UpdateRSet(HeapObject* obj);
-
// Rebuild remembered set in an old space.
static void RebuildRSets(PagedSpace* space);
@@ -1103,6 +1114,12 @@ class Heap : public AllStatic {
SharedFunctionInfo* shared,
Object* prototype);
+
+ // Initializes the number to string cache based on the max semispace size.
+ static Object* InitializeNumberStringCache();
+ // Flush the number to string cache.
+ static void FlushNumberStringCache();
+
static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64;
@@ -1234,7 +1251,7 @@ class OldSpaces BASE_EMBEDDED {
// Space iterator for iterating over all the paged spaces of the heap:
-// Map space, old pointer space, old data space and code space.
+// Map space, old pointer space, old data space, code space and cell space.
// Returns each space in turn, and null when it is done.
class PagedSpaces BASE_EMBEDDED {
public:
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index a0236d026..0e9ffeaab 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -575,6 +575,7 @@ void Assembler::leave() {
void Assembler::mov_b(Register dst, const Operand& src) {
+ ASSERT(dst.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x8A);
@@ -592,6 +593,7 @@ void Assembler::mov_b(const Operand& dst, int8_t imm8) {
void Assembler::mov_b(const Operand& dst, Register src) {
+ ASSERT(src.code() < 4);
EnsureSpace ensure_space(this);
last_pc_ = pc_;
EMIT(0x88);
@@ -752,6 +754,14 @@ void Assembler::cmov(Condition cc, Register dst, const Operand& src) {
}
+void Assembler::rep_movs() {
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0xA5);
+}
+
+
void Assembler::xchg(Register dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1199,6 +1209,15 @@ void Assembler::sub(Register dst, const Operand& src) {
}
+void Assembler::subb(Register dst, const Operand& src) {
+ ASSERT(dst.code() < 4);
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x2A);
+ emit_operand(dst, src);
+}
+
+
void Assembler::sub(const Operand& dst, Register src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1586,7 +1605,6 @@ void Assembler::j(Condition cc, Handle<Code> code, Hint hint) {
// FPU instructions
-
void Assembler::fld(int i) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -1675,6 +1693,15 @@ void Assembler::fisttp_s(const Operand& adr) {
}
+void Assembler::fisttp_d(const Operand& adr) {
+ ASSERT(CpuFeatures::IsEnabled(SSE3));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xDD);
+ emit_operand(ecx, adr);
+}
+
+
void Assembler::fist_s(const Operand& adr) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
@@ -2026,6 +2053,50 @@ void Assembler::comisd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::movdqa(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqa(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
+void Assembler::movdqu(const Operand& dst, XMMRegister src ) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ last_pc_ = pc_;
+ EMIT(0xF3);
+ EMIT(0x0F);
+ EMIT(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::movdbl(XMMRegister dst, const Operand& src) {
EnsureSpace ensure_space(this);
last_pc_ = pc_;
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index 21fa0d5a0..f35abd575 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -540,6 +540,9 @@ class Assembler : public Malloced {
void cmov(Condition cc, Register dst, Handle<Object> handle);
void cmov(Condition cc, Register dst, const Operand& src);
+ // Repetitive string instructions.
+ void rep_movs();
+
// Exchange two registers
void xchg(Register dst, Register src);
@@ -614,6 +617,7 @@ class Assembler : public Malloced {
void shr_cl(Register dst);
void subb(const Operand& dst, int8_t imm8);
+ void subb(Register dst, const Operand& src);
void sub(const Operand& dst, const Immediate& x);
void sub(Register dst, const Operand& src);
void sub(const Operand& dst, Register src);
@@ -693,6 +697,7 @@ class Assembler : public Malloced {
void fistp_d(const Operand& adr);
void fisttp_s(const Operand& adr);
+ void fisttp_d(const Operand& adr);
void fabs();
void fchs();
@@ -749,6 +754,11 @@ class Assembler : public Malloced {
void comisd(XMMRegister dst, XMMRegister src);
+ void movdqa(XMMRegister dst, const Operand& src);
+ void movdqa(const Operand& dst, XMMRegister src);
+ void movdqu(XMMRegister dst, const Operand& src);
+ void movdqu(const Operand& dst, XMMRegister src);
+
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
void movdbl(const Operand& dst, XMMRegister src);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index f4dd2f931..d56c02de1 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -86,7 +86,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
__ EnterConstructFrame();
// Store a smi-tagged arguments count on the stack.
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ push(eax);
// Push the function to invoke on the stack.
@@ -255,7 +255,7 @@ void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Retrieve smi-tagged arguments count from the stack.
__ mov(eax, Operand(esp, 0));
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
// Push the allocated receiver to the stack. We need two copies
// because we may have to return the original one and the calling
@@ -440,8 +440,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ EnterInternalFrame(); // preserves eax, ebx, edi
// Store the arguments count on the stack (smi tagged).
- ASSERT(kSmiTag == 0);
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ push(eax);
__ push(edi); // save edi across the call
@@ -452,7 +451,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Get the arguments count and untag it.
__ pop(eax);
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ LeaveInternalFrame();
__ jmp(&patch_receiver);
@@ -634,7 +633,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Invoke the function.
ParameterCount actual(eax);
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ mov(edi, Operand(ebp, 4 * kPointerSize));
__ InvokeFunction(edi, actual, CALL_FUNCTION);
@@ -831,7 +830,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// elements_array_end: start of next object
// array_size: size of array (smi)
ASSERT(kSmiTag == 0);
- __ shr(array_size, kSmiTagSize); // Convert from smi to value.
+ __ SmiUntag(array_size); // Convert from smi to value.
__ mov(FieldOperand(elements_array, JSObject::kMapOffset),
Factory::fixed_array_map());
Label not_empty_2, fill_array;
@@ -960,7 +959,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
// Handle construction of an array from a list of arguments.
__ bind(&argc_two_or_more);
ASSERT(kSmiTag == 0);
- __ shl(eax, kSmiTagSize); // Convet argc to a smi.
+ __ SmiTag(eax); // Convet argc to a smi.
// eax: array_size (smi)
// edi: constructor
// esp[0] : argc
diff --git a/deps/v8/src/ia32/codegen-ia32-inl.h b/deps/v8/src/ia32/codegen-ia32-inl.h
index 44e937af1..49c706d13 100644
--- a/deps/v8/src/ia32/codegen-ia32-inl.h
+++ b/deps/v8/src/ia32/codegen-ia32-inl.h
@@ -39,16 +39,6 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 249468658..993675ba6 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -32,7 +32,10 @@
#include "compiler.h"
#include "debug.h"
#include "ic-inl.h"
+#include "jsregexp.h"
#include "parser.h"
+#include "regexp-macro-assembler.h"
+#include "regexp-stack.h"
#include "register-allocator-inl.h"
#include "runtime.h"
#include "scopes.h"
@@ -248,6 +251,12 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
StoreArgumentsObject(true);
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -600,36 +609,33 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result);
}
- { Reference shadow_ref(this, scope_->arguments_shadow());
- Reference arguments_ref(this, scope_->arguments());
- ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result arguments = frame_->Pop();
- if (arguments.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !arguments.handle()->IsTheHole();
- } else {
- __ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
- arguments.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- arguments_ref.SetValue(NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has already
+ // been written to. This can happen if the a function has a local
+ // variable named 'arguments'.
+ LoadFromSlot(arguments->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has
+ // been assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
+ probe.Unuse();
+ done.Branch(not_equal);
}
- shadow_ref.SetValue(NOT_CONST_INIT);
}
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -661,15 +667,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
Load(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
@@ -761,6 +759,11 @@ class FloatingPointHelper : public AllStatic {
static void CheckFloatOperands(MacroAssembler* masm,
Label* non_float,
Register scratch);
+ // Takes the operands in edx and eax and loads them as integers in eax
+ // and ecx.
+ static void LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* operand_conversion_failure);
// Test if operands are numbers (smi or HeapNumber objects), and load
// them into xmm0 and xmm1 if they are. Jump to label not_numbers if
// either operand is not a number. Operands are in edx and eax.
@@ -771,8 +774,8 @@ class FloatingPointHelper : public AllStatic {
const char* GenericBinaryOpStub::GetName() {
if (name_ != NULL) return name_;
- const int len = 100;
- name_ = Bootstrapper::AllocateAutoDeletedArray(len);
+ const int kMaxNameLength = 100;
+ name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* op_name = Token::Name(op_);
const char* overwrite_name;
@@ -783,7 +786,7 @@ const char* GenericBinaryOpStub::GetName() {
default: overwrite_name = "UnknownOverwrite"; break;
}
- OS::SNPrintF(Vector<char>(name_, len),
+ OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
"GenericBinaryOpStub_%s_%s%s_%s%s",
op_name,
overwrite_name,
@@ -838,9 +841,9 @@ void DeferredInlineBinaryOperation::Generate() {
__ jmp(&load_right);
__ bind(&left_smi);
- __ sar(left_, 1);
+ __ SmiUntag(left_);
__ cvtsi2sd(xmm0, Operand(left_));
- __ shl(left_, 1);
+ __ SmiTag(left_);
if (mode_ == OVERWRITE_LEFT) {
Label alloc_failure;
__ push(left_);
@@ -866,9 +869,9 @@ void DeferredInlineBinaryOperation::Generate() {
__ jmp(&do_op);
__ bind(&right_smi);
- __ sar(right_, 1);
+ __ SmiUntag(right_);
__ cvtsi2sd(xmm1, Operand(right_));
- __ shl(right_, 1);
+ __ SmiTag(right_);
if (mode_ == OVERWRITE_RIGHT || mode_ == NO_OVERWRITE) {
Label alloc_failure;
__ push(left_);
@@ -1211,8 +1214,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
__ test(edx, Operand(edx));
deferred->Branch(not_zero);
// Tag the result and store it in the quotient register.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -1272,8 +1274,8 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Untag both operands.
__ mov(answer.reg(), left->reg());
- __ sar(answer.reg(), kSmiTagSize);
- __ sar(ecx, kSmiTagSize);
+ __ SmiUntag(answer.reg());
+ __ SmiUntag(ecx);
// Perform the operation.
switch (op) {
case Token::SAR:
@@ -1295,8 +1297,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// in a case where it is dropped anyway.
__ test(answer.reg(), Immediate(0xc0000000));
__ j(zero, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(ecx, kSmiTagSize);
+ __ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break;
@@ -1307,8 +1308,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
// Check that the *signed* result fits in a smi.
__ cmp(answer.reg(), 0xc0000000);
__ j(positive, &result_ok);
- ASSERT(kSmiTag == 0);
- __ shl(ecx, kSmiTagSize);
+ __ SmiTag(ecx);
deferred->Jump();
__ bind(&result_ok);
break;
@@ -1317,9 +1317,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
UNREACHABLE();
}
// Smi-tag the result in answer.
- ASSERT(kSmiTagSize == 1); // Adjust code if not the case.
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ __ SmiTag(answer.reg());
deferred->BindExit();
left->Unuse();
right->Unuse();
@@ -1369,7 +1367,7 @@ void CodeGenerator::LikelySmiBinaryOperation(Token::Value op,
ASSERT(kSmiTag == 0); // Adjust code below if not the case.
// Remove smi tag from the left operand (but keep sign).
// Left-hand operand has been copied into answer.
- __ sar(answer.reg(), kSmiTagSize);
+ __ SmiUntag(answer.reg());
// Do multiplication of smis, leaving result in answer.
__ imul(answer.reg(), Operand(right->reg()));
// Go slow on overflows.
@@ -1716,7 +1714,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->Branch(not_zero);
__ mov(answer.reg(), operand->reg());
- __ sar(answer.reg(), kSmiTagSize);
+ __ SmiUntag(answer.reg());
__ shr(answer.reg(), shift_value);
// A negative Smi shifted right two is in the positive Smi range.
if (shift_value < 2) {
@@ -1724,9 +1722,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->Branch(not_zero);
}
operand->Unuse();
- ASSERT(kSmiTagSize == times_2); // Adjust the code if not true.
- __ lea(answer.reg(),
- Operand(answer.reg(), answer.reg(), times_1, kSmiTag));
+ __ SmiTag(answer.reg());
deferred->BindExit();
frame_->Push(&answer);
}
@@ -1734,9 +1730,42 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
case Token::SHL:
if (reversed) {
- Result constant_operand(value);
- LikelySmiBinaryOperation(op, &constant_operand, operand,
- overwrite_mode);
+ Result right;
+ Result right_copy_in_ecx;
+
+ // Make sure to get a copy of the right operand into ecx. This
+ // allows us to modify it without having to restore it in the
+ // deferred code.
+ operand->ToRegister();
+ if (operand->reg().is(ecx)) {
+ right = allocator()->Allocate();
+ __ mov(right.reg(), ecx);
+ frame_->Spill(ecx);
+ right_copy_in_ecx = *operand;
+ } else {
+ right_copy_in_ecx = allocator()->Allocate(ecx);
+ __ mov(ecx, operand->reg());
+ right = *operand;
+ }
+ operand->Unuse();
+
+ Result answer = allocator()->Allocate();
+ DeferredInlineSmiOperationReversed* deferred =
+ new DeferredInlineSmiOperationReversed(op,
+ answer.reg(),
+ smi_value,
+ right.reg(),
+ overwrite_mode);
+ __ mov(answer.reg(), Immediate(int_value));
+ __ sar(ecx, kSmiTagSize);
+ deferred->Branch(carry);
+ __ shl_cl(answer.reg());
+ __ cmp(answer.reg(), 0xc0000000);
+ deferred->Branch(sign);
+ __ SmiTag(answer.reg());
+
+ deferred->BindExit();
+ frame_->Push(&answer);
} else {
// Only the least significant 5 bits of the shift value are used.
// In the slow case, this masking is done inside the runtime call.
@@ -1865,6 +1894,13 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
}
+static bool CouldBeNaN(const Result& result) {
+ if (!result.is_constant()) return true;
+ if (!result.handle()->IsHeapNumber()) return false;
+ return isnan(HeapNumber::cast(*result.handle())->value());
+}
+
+
void CodeGenerator::Comparison(AstNode* node,
Condition cc,
bool strict,
@@ -1885,15 +1921,28 @@ void CodeGenerator::Comparison(AstNode* node,
}
ASSERT(cc == less || cc == equal || cc == greater_equal);
- // If either side is a constant smi, optimize the comparison.
- bool left_side_constant_smi =
- left_side.is_constant() && left_side.handle()->IsSmi();
- bool right_side_constant_smi =
- right_side.is_constant() && right_side.handle()->IsSmi();
- bool left_side_constant_null =
- left_side.is_constant() && left_side.handle()->IsNull();
- bool right_side_constant_null =
- right_side.is_constant() && right_side.handle()->IsNull();
+ // If either side is a constant of some sort, we can probably optimize the
+ // comparison.
+ bool left_side_constant_smi = false;
+ bool left_side_constant_null = false;
+ bool left_side_constant_1_char_string = false;
+ if (left_side.is_constant()) {
+ left_side_constant_smi = left_side.handle()->IsSmi();
+ left_side_constant_null = left_side.handle()->IsNull();
+ left_side_constant_1_char_string =
+ (left_side.handle()->IsString() &&
+ (String::cast(*left_side.handle())->length() == 1));
+ }
+ bool right_side_constant_smi = false;
+ bool right_side_constant_null = false;
+ bool right_side_constant_1_char_string = false;
+ if (right_side.is_constant()) {
+ right_side_constant_smi = right_side.handle()->IsSmi();
+ right_side_constant_null = right_side.handle()->IsNull();
+ right_side_constant_1_char_string =
+ (right_side.handle()->IsString() &&
+ (String::cast(*right_side.handle())->length() == 1));
+ }
if (left_side_constant_smi || right_side_constant_smi) {
if (left_side_constant_smi && right_side_constant_smi) {
@@ -1982,7 +2031,7 @@ void CodeGenerator::Comparison(AstNode* node,
}
// Setup and call the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
Result result = frame_->CallStub(&stub, &left_side, &right_side);
result.ToRegister();
__ cmp(result.reg(), 0);
@@ -2041,18 +2090,153 @@ void CodeGenerator::Comparison(AstNode* node,
operand.Unuse();
dest->Split(not_zero);
}
+ } else if (left_side_constant_1_char_string ||
+ right_side_constant_1_char_string) {
+ if (left_side_constant_1_char_string && right_side_constant_1_char_string) {
+ // Trivial case, comparing two constants.
+ int left_value = String::cast(*left_side.handle())->Get(0);
+ int right_value = String::cast(*right_side.handle())->Get(0);
+ switch (cc) {
+ case less:
+ dest->Goto(left_value < right_value);
+ break;
+ case equal:
+ dest->Goto(left_value == right_value);
+ break;
+ case greater_equal:
+ dest->Goto(left_value >= right_value);
+ break;
+ default:
+ UNREACHABLE();
+ }
+ } else {
+ // Only one side is a constant 1 character string.
+ // If left side is a constant 1-character string, reverse the operands.
+ // Since one side is a constant string, conversion order does not matter.
+ if (left_side_constant_1_char_string) {
+ Result temp = left_side;
+ left_side = right_side;
+ right_side = temp;
+ cc = ReverseCondition(cc);
+ // This may reintroduce greater or less_equal as the value of cc.
+ // CompareStub and the inline code both support all values of cc.
+ }
+ // Implement comparison against a constant string, inlining the case
+ // where both sides are strings.
+ left_side.ToRegister();
+
+ // Here we split control flow to the stub call and inlined cases
+ // before finally splitting it to the control destination. We use
+ // a jump target and branching to duplicate the virtual frame at
+ // the first split. We manually handle the off-frame references
+ // by reconstituting them on the non-fall-through path.
+ JumpTarget is_not_string, is_string;
+ Register left_reg = left_side.reg();
+ Handle<Object> right_val = right_side.handle();
+ __ test(left_side.reg(), Immediate(kSmiTagMask));
+ is_not_string.Branch(zero, &left_side);
+ Result temp = allocator_->Allocate();
+ ASSERT(temp.is_valid());
+ __ mov(temp.reg(),
+ FieldOperand(left_side.reg(), HeapObject::kMapOffset));
+ __ movzx_b(temp.reg(),
+ FieldOperand(temp.reg(), Map::kInstanceTypeOffset));
+ // If we are testing for equality then make use of the symbol shortcut.
+ // Check if the right left hand side has the same type as the left hand
+ // side (which is always a symbol).
+ if (cc == equal) {
+ Label not_a_symbol;
+ ASSERT(kSymbolTag != 0);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ __ test(temp.reg(), Immediate(kIsSymbolMask)); // Test the symbol bit.
+ __ j(zero, &not_a_symbol);
+ // They are symbols, so do identity compare.
+ __ cmp(left_side.reg(), right_side.handle());
+ dest->true_target()->Branch(equal);
+ dest->false_target()->Branch(not_equal);
+ __ bind(&not_a_symbol);
+ }
+ // If the receiver is not a string of the type we handle call the stub.
+ __ and_(temp.reg(),
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(temp.reg(), kStringTag | kSeqStringTag | kAsciiStringTag);
+ temp.Unuse();
+ is_string.Branch(equal, &left_side);
+
+ // Setup and call the compare stub.
+ is_not_string.Bind(&left_side);
+ CompareStub stub(cc, strict, kCantBothBeNaN);
+ Result result = frame_->CallStub(&stub, &left_side, &right_side);
+ result.ToRegister();
+ __ cmp(result.reg(), 0);
+ result.Unuse();
+ dest->true_target()->Branch(cc);
+ dest->false_target()->Jump();
+
+ is_string.Bind(&left_side);
+ // Here we know we have a sequential ASCII string.
+ left_side = Result(left_reg);
+ right_side = Result(right_val);
+ Result temp2 = allocator_->Allocate();
+ ASSERT(temp2.is_valid());
+ // Test string equality and comparison.
+ if (cc == equal) {
+ Label comparison_done;
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(1));
+ __ j(not_equal, &comparison_done);
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmpb(FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize),
+ char_value);
+ __ bind(&comparison_done);
+ } else {
+ __ mov(temp2.reg(),
+ FieldOperand(left_side.reg(), String::kLengthOffset));
+ __ sub(Operand(temp2.reg()), Immediate(1));
+ Label comparison;
+ // If the length is 0 then our subtraction gave -1 which compares less
+ // than any character.
+ __ j(negative, &comparison);
+ // Otherwise load the first character.
+ __ movzx_b(temp2.reg(),
+ FieldOperand(left_side.reg(), SeqAsciiString::kHeaderSize));
+ __ bind(&comparison);
+ // Compare the first character of the string with out constant
+ // 1-character string.
+ uint8_t char_value =
+ static_cast<uint8_t>(String::cast(*right_side.handle())->Get(0));
+ __ cmp(Operand(temp2.reg()), Immediate(char_value));
+ Label characters_were_different;
+ __ j(not_equal, &characters_were_different);
+ // If the first character is the same then the long string sorts after
+ // the short one.
+ __ cmp(FieldOperand(left_side.reg(), String::kLengthOffset),
+ Immediate(1));
+ __ bind(&characters_were_different);
+ }
+ temp2.Unuse();
+ left_side.Unuse();
+ right_side.Unuse();
+ dest->Split(cc);
+ }
} else {
// Neither side is a constant Smi or null.
// If either side is a non-smi constant, skip the smi check.
bool known_non_smi =
(left_side.is_constant() && !left_side.handle()->IsSmi()) ||
(right_side.is_constant() && !right_side.handle()->IsSmi());
+ NaNInformation nan_info =
+ (CouldBeNaN(left_side) && CouldBeNaN(right_side)) ?
+ kBothCouldBeNaN :
+ kCantBothBeNaN;
left_side.ToRegister();
right_side.ToRegister();
if (known_non_smi) {
// When non-smi, call out to the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, nan_info);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
@@ -2079,7 +2263,7 @@ void CodeGenerator::Comparison(AstNode* node,
temp.Unuse();
is_smi.Branch(zero, taken);
// When non-smi, call out to the compare stub.
- CompareStub stub(cc, strict);
+ CompareStub stub(cc, strict, nan_info);
Result answer = frame_->CallStub(&stub, &left_side, &right_side);
if (cc == equal) {
__ test(answer.reg(), Operand(answer.reg()));
@@ -2239,7 +2423,7 @@ void CodeGenerator::CallApplyLazy(Property* apply,
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
- __ shr(eax, kSmiTagSize);
+ __ SmiUntag(eax);
__ mov(ecx, Operand(eax));
__ cmp(eax, kArgumentsLimit);
build_args.Branch(above);
@@ -3266,7 +3450,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
frame_->EmitPush(eax); // <- slot 3
frame_->EmitPush(edx); // <- slot 2
__ mov(eax, FieldOperand(edx, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
entry.Jump();
@@ -3278,7 +3462,7 @@ void CodeGenerator::VisitForInStatement(ForInStatement* node) {
// Push the length of the array and the initial index onto the stack.
__ mov(eax, FieldOperand(eax, FixedArray::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
frame_->EmitPush(eax); // <- slot 1
frame_->EmitPush(Immediate(Smi::FromInt(0))); // <- slot 0
@@ -3394,13 +3578,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->EmitPush(eax);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Load the exception to the top of the stack. Here we make use of the
- // convenient property that it doesn't matter whether a value is
- // immediately on top of or underneath a zero-sized reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -3721,21 +3901,21 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
ASSERT(boilerplate->IsBoilerplate());
+ // The inevitable call will sync frame elements to memory anyway, so
+ // we do it eagerly to allow us to push the arguments directly into
+ // place.
+ frame_->SyncRange(0, frame_->element_count() - 1);
+
// Use the fast case closure allocation code that allocates in new
// space for nested functions that don't need literals cloning.
if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
FastNewClosureStub stub;
- frame_->Push(boilerplate);
+ frame_->EmitPush(Immediate(boilerplate));
Result answer = frame_->CallStub(&stub, 1);
frame_->Push(&answer);
} else {
// Call the runtime to instantiate the function boilerplate
- // object. The inevitable call will sync frame elements to memory
- // anyway, so we do it eagerly to allow us to push the arguments
- // directly into place.
- frame_->SyncRange(0, frame_->element_count() - 1);
-
- // Create a new closure.
+ // object.
frame_->EmitPush(esi);
frame_->EmitPush(Immediate(boilerplate));
Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
@@ -4238,46 +4418,10 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
}
-// Materialize the object literal 'node' in the literals array
-// 'literals' of the function. Leave the object boilerplate in
-// 'boilerplate'.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- DeferredObjectLiteral(Register boilerplate,
- Register literals,
- ObjectLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // Constant properties (2).
- __ push(Immediate(node_->constant_properties()));
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -4287,32 +4431,18 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Load the literals array of the function.
__ mov(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredObjectLiteral* deferred =
- new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the boilerplate object.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant properties.
+ frame_->Push(node->constant_properties());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
for (int i = 0; i < node->properties()->length(); i++) {
@@ -4372,45 +4502,10 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
-// Materialize the array literal 'node' in the literals array 'literals'
-// of the function. Leave the array boilerplate in 'boilerplate'.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- DeferredArrayLiteral(Register boilerplate,
- Register literals,
- ArrayLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ push(Immediate(Smi::FromInt(node_->literal_index())));
- // Constant properties (2).
- __ push(Immediate(node_->literals()));
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- if (!boilerplate_.is(eax)) __ mov(boilerplate_, eax);
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -4421,36 +4516,19 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
__ mov(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ mov(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredArrayLiteral* deferred =
- new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
- __ cmp(boilerplate.reg(), Factory::undefined_value());
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the resulting array literal boilerplate on the stack.
- frame_->Push(&boilerplate);
-
- // Clone the boilerplate object.
+ frame_->Push(&literals);
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ frame_->Push(node->constant_elements());
int length = node->values()->length();
Result clone;
- if (node->depth() == 1 &&
- length <= FastCloneShallowArrayStub::kMaximumLength) {
- FastCloneShallowArrayStub stub(length);
- clone = frame_->CallStub(&stub, 1);
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else if (length > FastCloneShallowArrayStub::kMaximumLength) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
- clone = frame_->CallRuntime(Runtime::kCloneLiteralBoilerplate, 1);
+ FastCloneShallowArrayStub stub(length);
+ clone = frame_->CallStub(&stub, 3);
}
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
@@ -4651,22 +4729,19 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->Push(Factory::undefined_value());
}
+ // Push the receiver.
+ frame_->PushParameterAt(-1);
+
// Resolve the call.
Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
-
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ mov(scratch.reg(), FieldOperand(result.reg(), FixedArray::kHeaderSize));
- frame_->SetElementAt(arg_count + 1, &scratch);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ mov(result.reg(),
- FieldOperand(result.reg(), FixedArray::kHeaderSize + kPointerSize));
- frame_->SetElementAt(arg_count, &result);
+ // The runtime call returns a pair of values in eax (function) and
+ // edx (receiver). Touch up the stack with the right values.
+ Result receiver = allocator_->Allocate(edx);
+ frame_->SetElementAt(arg_count + 1, &result);
+ frame_->SetElementAt(arg_count, &receiver);
+ receiver.Unuse();
// Call the function.
CodeForSourcePosition(node->position());
@@ -4966,7 +5041,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
__ test(index.reg(), Immediate(kSmiTagMask | 0x80000000));
__ j(not_zero, &slow_case);
// Untag the index.
- __ sar(index.reg(), kSmiTagSize);
+ __ SmiUntag(index.reg());
__ bind(&try_again_with_new_string);
// Fetch the instance type of the receiver into ecx.
@@ -5009,8 +5084,7 @@ void CodeGenerator::GenerateFastCharCodeAt(ZoneList<Expression*>* args) {
times_1,
SeqAsciiString::kHeaderSize));
__ bind(&got_char_code);
- ASSERT(kSmiTag == 0);
- __ shl(temp.reg(), kSmiTagSize);
+ __ SmiTag(temp.reg());
__ jmp(&end);
// Handle non-flat strings.
@@ -5342,87 +5416,57 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- JumpTarget done;
- JumpTarget call_runtime;
- ASSERT(args->length() == 1);
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
- // Load number and duplicate it.
Load(args->at(0));
- frame_->Dup();
+ Load(args->at(1));
+
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
- // Get the number into an unaliased register and load it onto the
- // floating point stack still leaving one copy on the frame.
- Result number = frame_->Pop();
- number.ToRegister();
- frame_->Spill(number.reg());
- FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
- number.Unuse();
- // Perform the operation on the number.
- switch (op) {
- case SIN:
- __ fsin();
- break;
- case COS:
- __ fcos();
- break;
- }
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
- // Go slow case if argument to operation is out of range.
- Result eax_reg = allocator_->Allocate(eax);
- ASSERT(eax_reg.is_valid());
- __ fnstsw_ax();
- __ sahf();
- eax_reg.Unuse();
- call_runtime.Branch(parity_even, not_taken);
-
- // Allocate heap number for result if possible.
- Result scratch1 = allocator()->Allocate();
- Result scratch2 = allocator()->Allocate();
- Result heap_number = allocator()->Allocate();
- __ AllocateHeapNumber(heap_number.reg(),
- scratch1.reg(),
- scratch2.reg(),
- call_runtime.entry_label());
- scratch1.Unuse();
- scratch2.Unuse();
-
- // Store the result in the allocated heap number.
- __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
- // Replace the extra copy of the argument with the result.
- frame_->SetElementAt(0, &heap_number);
- done.Jump();
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
- call_runtime.Bind();
- // Free ST(0) which was not popped before calling into the runtime.
- __ ffree(0);
- Result answer;
- switch (op) {
- case SIN:
- answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
+ SubStringStub stub;
+ Result answer = frame_->CallStub(&stub, 3);
frame_->Push(&answer);
- done.Bind();
}
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
Load(args->at(0));
Load(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
+ StringCompareStub stub;
Result answer = frame_->CallStub(&stub, 2);
frame_->Push(&answer);
}
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
+
+ // Load the arguments on the stack and call the stub.
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ RegExpExecStub stub;
+ Result result = frame_->CallStub(&stub, 4);
+ frame_->Push(&result);
+}
+
+
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@@ -5551,12 +5595,12 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
} else {
Load(node->expression());
+ bool overwrite =
+ (node->expression()->AsBinaryOperation() != NULL &&
+ node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
switch (op) {
case Token::SUB: {
- bool overwrite =
- (node->expression()->AsBinaryOperation() != NULL &&
- node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -5573,16 +5617,16 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
__ test(operand.reg(), Immediate(kSmiTagMask));
smi_label.Branch(zero, &operand, taken);
- frame_->Push(&operand); // undo popping of TOS
- Result answer = frame_->InvokeBuiltin(Builtins::BIT_NOT,
- CALL_FUNCTION, 1);
-
+ GenericUnaryOpStub stub(Token::BIT_NOT, overwrite);
+ Result answer = frame_->CallStub(&stub, &operand);
continue_label.Jump(&answer);
+
smi_label.Bind(&answer);
answer.ToRegister();
frame_->Spill(answer.reg());
__ not_(answer.reg());
__ and_(answer.reg(), ~kSmiTagMask); // Remove inverted smi-tag.
+
continue_label.Bind(&answer);
frame_->Push(&answer);
break;
@@ -5964,6 +6008,8 @@ void CodeGenerator::VisitThisFunction(ThisFunction* node) {
void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
Comment cmnt(masm_, "[ CompareOperation");
+ bool left_already_loaded = false;
+
// Get the expressions from the node.
Expression* left = node->left();
Expression* right = node->right();
@@ -6044,7 +6090,6 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
__ CmpInstanceType(answer.reg(), JS_REGEXP_TYPE);
answer.Unuse();
destination()->Split(equal);
-
} else if (check->Equals(Heap::object_symbol())) {
__ test(answer.reg(), Immediate(kSmiTagMask));
destination()->false_target()->Branch(zero);
@@ -6076,6 +6121,38 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
destination()->Goto(false);
}
return;
+ } else if (op == Token::LT &&
+ right->AsLiteral() != NULL &&
+ right->AsLiteral()->handle()->IsHeapNumber()) {
+ Handle<HeapNumber> check(HeapNumber::cast(*right->AsLiteral()->handle()));
+ if (check->value() == 2147483648.0) { // 0x80000000.
+ Load(left);
+ left_already_loaded = true;
+ Result lhs = frame_->Pop();
+ lhs.ToRegister();
+ __ test(lhs.reg(), Immediate(kSmiTagMask));
+ destination()->true_target()->Branch(zero); // All Smis are less.
+ Result scratch = allocator()->Allocate();
+ ASSERT(scratch.is_valid());
+ __ mov(scratch.reg(), FieldOperand(lhs.reg(), HeapObject::kMapOffset));
+ __ cmp(scratch.reg(), Factory::heap_number_map());
+ JumpTarget not_a_number;
+ not_a_number.Branch(not_equal, &lhs);
+ __ mov(scratch.reg(),
+ FieldOperand(lhs.reg(), HeapNumber::kExponentOffset));
+ __ cmp(Operand(scratch.reg()), Immediate(0xfff00000));
+ not_a_number.Branch(above_equal, &lhs); // It's a negative NaN or -Inf.
+ const uint32_t borderline_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch.reg()), Immediate(borderline_exponent));
+ scratch.Unuse();
+ lhs.Unuse();
+ destination()->true_target()->Branch(less);
+ destination()->false_target()->Jump();
+
+ not_a_number.Bind(&lhs);
+ frame_->Push(&lhs);
+ }
}
Condition cc = no_condition;
@@ -6100,14 +6177,14 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
cc = greater_equal;
break;
case Token::IN: {
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
Result answer = frame_->InvokeBuiltin(Builtins::IN, CALL_FUNCTION, 2);
frame_->Push(&answer); // push the result
return;
}
case Token::INSTANCEOF: {
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
InstanceofStub stub;
Result answer = frame_->CallStub(&stub, 2);
@@ -6120,7 +6197,7 @@ void CodeGenerator::VisitCompareOperation(CompareOperation* node) {
default:
UNREACHABLE();
}
- Load(left);
+ if (!left_already_loaded) Load(left);
Load(right);
Comparison(node, cc, strict, destination());
}
@@ -6462,7 +6539,7 @@ void Reference::GetValue() {
// Shift the key to get the actual index value and check that
// it is within bounds.
__ mov(index.reg(), key.reg());
- __ sar(index.reg(), kSmiTagSize);
+ __ SmiUntag(index.reg());
__ cmp(index.reg(),
FieldOperand(elements.reg(), FixedArray::kLengthOffset));
deferred->Branch(above_equal);
@@ -6768,13 +6845,19 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
int elements_size = (length_ > 0) ? FixedArray::SizeFor(length_) : 0;
int size = JSArray::kSize + elements_size;
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoid multiple limit checks.
- Label gc;
- __ AllocateInNewSpace(size, eax, ebx, ecx, &gc, TAG_OBJECT);
+ // Load boilerplate object into ecx and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ mov(ecx, Operand(esp, 3 * kPointerSize));
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+ __ mov(ecx, FieldOperand(ecx, eax, times_2, FixedArray::kHeaderSize));
+ __ cmp(ecx, Factory::undefined_value());
+ __ j(equal, &slow_case);
- // Get the boilerplate from the stack.
- __ mov(ecx, Operand(esp, 1 * kPointerSize));
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size, eax, ebx, edx, &slow_case, TAG_OBJECT);
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
@@ -6798,12 +6881,12 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
}
}
- // Return and remove the on-stack parameter.
- __ ret(1 * kPointerSize);
+ // Return and remove the on-stack parameters.
+ __ ret(3 * kPointerSize);
- __ bind(&gc);
- ExternalReference runtime(Runtime::kCloneShallowLiteralBoilerplate);
- __ TailCallRuntime(runtime, 1, 1);
+ __ bind(&slow_case);
+ ExternalReference runtime(Runtime::kCreateArrayLiteralShallow);
+ __ TailCallRuntime(runtime, 3, 1);
}
@@ -7027,7 +7110,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// If the smi tag is 0 we can just leave the tag on one operand.
ASSERT(kSmiTag == 0); // adjust code below if not the case
// Remove tag from one of the operands (but keep sign).
- __ sar(eax, kSmiTagSize);
+ __ SmiUntag(eax);
// Do multiplication.
__ imul(eax, Operand(ebx)); // multiplication of smis; result in eax
// Go slow on overflows.
@@ -7051,8 +7134,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
__ test(edx, Operand(edx));
__ j(not_zero, slow);
// Tag the result and store it in register eax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
break;
case Token::MOD:
@@ -7082,8 +7164,8 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Move the second operand into register ecx.
__ mov(ecx, Operand(ebx));
// Remove tags from operands (but keep sign).
- __ sar(eax, kSmiTagSize);
- __ sar(ecx, kSmiTagSize);
+ __ SmiUntag(eax);
+ __ SmiUntag(ecx);
// Perform the operation.
switch (op_) {
case Token::SAR:
@@ -7111,8 +7193,7 @@ void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
UNREACHABLE();
}
// Tag the result and store it in register eax.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
break;
default:
@@ -7237,42 +7318,12 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SAR:
case Token::SHL:
case Token::SHR: {
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
- FloatingPointHelper::LoadFloatOperands(masm, ecx);
-
- Label skip_allocation, non_smi_result, operand_conversion_failure;
-
- // Reserve space for converted numbers.
- __ sub(Operand(esp), Immediate(2 * kPointerSize));
-
- if (use_sse3_) {
- // Truncate the operands to 32-bit integers and check for
- // exceptions in doing so.
- CpuFeatures::Scope scope(SSE3);
- __ fisttp_s(Operand(esp, 0 * kPointerSize));
- __ fisttp_s(Operand(esp, 1 * kPointerSize));
- __ fnstsw_ax();
- __ test(eax, Immediate(1));
- __ j(not_zero, &operand_conversion_failure);
- } else {
- // Check if right operand is int32.
- __ fist_s(Operand(esp, 0 * kPointerSize));
- __ fild_s(Operand(esp, 0 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
-
- // Check if left operand is int32.
- __ fist_s(Operand(esp, 1 * kPointerSize));
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ FCmp();
- __ j(not_zero, &operand_conversion_failure);
- __ j(parity_even, &operand_conversion_failure);
- }
-
- // Get int32 operands and perform bitop.
- __ pop(ecx);
- __ pop(eax);
+ Label non_smi_result, skip_allocation;
+ Label operand_conversion_failure;
+ FloatingPointHelper::LoadAsIntegers(
+ masm,
+ use_sse3_,
+ &operand_conversion_failure);
switch (op_) {
case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
@@ -7292,8 +7343,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
- ASSERT(kSmiTagSize == times_2); // adjust code if not the case
- __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
+ __ SmiTag(eax);
GenerateReturn(masm);
// All ops except SHR return a signed int32 that we load in a HeapNumber.
@@ -7318,28 +7368,20 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: UNREACHABLE();
}
// Store the result in the HeapNumber and return.
- __ mov(Operand(esp, 1 * kPointerSize), ebx);
- __ fild_s(Operand(esp, 1 * kPointerSize));
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ebx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ mov(Operand(esp, 1 * kPointerSize), ebx);
+ __ fild_s(Operand(esp, 1 * kPointerSize));
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
GenerateReturn(masm);
}
- // Clear the FPU exception flag and reset the stack before calling
- // the runtime system.
+ // Go to runtime for non-number inputs.
__ bind(&operand_conversion_failure);
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- if (use_sse3_) {
- // If we've used the SSE3 instructions for truncating the
- // floating point values to integers and it failed, we have a
- // pending #IA exception. Clear it.
- __ fnclex();
- } else {
- // The non-SSE3 variant does early bailout if the right
- // operand isn't a 32-bit integer, so we may have a single
- // value on the FPU stack we need to get rid of.
- __ ffree(0);
- }
-
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
@@ -7463,6 +7505,197 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
}
+// Get the integer part of a heap number. Surprisingly, all this bit twiddling
+// is faster than using the built-in instructions on floating point registers.
+// Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the
+// trashed registers.
+void IntegerConvert(MacroAssembler* masm,
+ Register source,
+ bool use_sse3,
+ Label* conversion_failure) {
+ Label done, right_exponent, normal_exponent;
+ Register scratch = ebx;
+ Register scratch2 = edi;
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(source, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ if (use_sse3) {
+ CpuFeatures::Scope scope(SSE3);
+ // Check whether the exponent is too big for a 64 bit signed integer.
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(kTooBigExponent));
+ __ j(greater_equal, conversion_failure);
+ // Load x87 register with heap number.
+ __ fld_d(FieldOperand(source, HeapNumber::kValueOffset));
+ // Reserve space for 64 bit answer.
+ __ sub(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ // Do conversion, which cannot fail because we checked the exponent.
+ __ fisttp_d(Operand(esp, 0));
+ __ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
+ __ add(Operand(esp), Immediate(sizeof(uint64_t))); // Nolint.
+ } else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, Operand(ecx));
+ // Check whether the exponent matches a 32 bit signed int that cannot be
+ // represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
+ // exponent is 30 (biased). This is the exponent that we are fastest at and
+ // also the highest exponent we can handle here.
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(non_smi_exponent));
+ // If we have a match of the int32-but-not-Smi exponent then skip some
+ // logic.
+ __ j(equal, &right_exponent);
+ // If the exponent is higher than that then go to slow case. This catches
+ // numbers that don't fit in a signed int32, infinities and NaNs.
+ __ j(less, &normal_exponent);
+
+ {
+ // Handle a big exponent. The only reason we have this code is that the
+ // >>> operator has a tendency to generate numbers with an exponent of 31.
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
+ __ cmp(Operand(scratch2), Immediate(big_non_smi_exponent));
+ __ j(not_equal, conversion_failure);
+ // We have the big exponent, typically from >>>. This means the number is
+ // in the range 2^31 to 2^32 - 1. Get the top bits of the mantissa.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch2, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We just orred in the implicit bit so that took care of one and
+ // we want to use the full unsigned range so we subtract 1 bit from the
+ // shift distance.
+ const int big_shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch2, big_shift_distance);
+ // Get the second half of the double.
+ __ mov(ecx, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 21 bits to get the most significant 11 bits or the low
+ // mantissa word.
+ __ shr(ecx, 32 - big_shift_distance);
+ __ or_(ecx, Operand(scratch2));
+ // We have the answer in ecx, but we may need to negate it.
+ __ test(scratch, Operand(scratch));
+ __ j(positive, &done);
+ __ neg(ecx);
+ __ jmp(&done);
+ }
+
+ __ bind(&normal_exponent);
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(Operand(scratch2), Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ mov(ecx, Immediate(30));
+ __ sub(ecx, Operand(scratch2));
+
+ __ bind(&right_exponent);
+ // Here ecx is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+ __ shl(scratch, shift_distance);
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ __ mov(scratch2, FieldOperand(source, HeapNumber::kMantissaOffset));
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch2, 32 - shift_distance);
+ __ or_(scratch2, Operand(scratch));
+ // Move down according to the exponent.
+ __ shr_cl(scratch2);
+ // Now the unsigned answer is in scratch2. We need to move it to ecx and
+ // we may need to fix the sign.
+ Label negative;
+ __ xor_(ecx, Operand(ecx));
+ __ cmp(ecx, FieldOperand(source, HeapNumber::kExponentOffset));
+ __ j(greater, &negative);
+ __ mov(ecx, scratch2);
+ __ jmp(&done);
+ __ bind(&negative);
+ __ sub(ecx, Operand(scratch2));
+ __ bind(&done);
+ }
+}
+
+
+// Input: edx, eax are the left and right objects of a bit op.
+// Output: eax, ecx are left and right integers for a bit op.
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm,
+ bool use_sse3,
+ Label* conversion_failure) {
+ // Check float operands.
+ Label arg1_is_object, check_undefined_arg1;
+ Label arg2_is_object, check_undefined_arg2;
+ Label load_arg2, done;
+
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg1_is_object);
+ __ SmiUntag(edx);
+ __ jmp(&load_arg2);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg1);
+ __ cmp(edx, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(edx, Immediate(0));
+ __ jmp(&load_arg2);
+
+ __ bind(&arg1_is_object);
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg1);
+ // Get the untagged integer version of the edx heap number in ecx.
+ IntegerConvert(masm, edx, use_sse3, conversion_failure);
+ __ mov(edx, ecx);
+
+ // Here edx has the untagged integer, eax has a Smi or a heap number.
+ __ bind(&load_arg2);
+ // Test if arg2 is a Smi.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &arg2_is_object);
+ __ SmiUntag(eax);
+ __ mov(ecx, eax);
+ __ jmp(&done);
+
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5).
+ __ bind(&check_undefined_arg2);
+ __ cmp(eax, Factory::undefined_value());
+ __ j(not_equal, conversion_failure);
+ __ mov(ecx, Immediate(0));
+ __ jmp(&done);
+
+ __ bind(&arg2_is_object);
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(ebx, Factory::heap_number_map());
+ __ j(not_equal, &check_undefined_arg2);
+ // Get the untagged integer version of the eax heap number in ecx.
+ IntegerConvert(masm, eax, use_sse3, conversion_failure);
+ __ bind(&done);
+ __ mov(eax, edx);
+}
+
+
void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
Register number) {
Label load_smi, done;
@@ -7473,7 +7706,7 @@ void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
__ jmp(&done);
__ bind(&load_smi);
- __ sar(number, kSmiTagSize);
+ __ SmiUntag(number);
__ push(number);
__ fild_s(Operand(esp, 0));
__ pop(number);
@@ -7499,14 +7732,14 @@ void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
__ j(equal, &load_float_eax);
__ jmp(not_numbers); // Argument in eax is not a number.
__ bind(&load_smi_edx);
- __ sar(edx, 1); // Untag smi before converting to float.
+ __ SmiUntag(edx); // Untag smi before converting to float.
__ cvtsi2sd(xmm0, Operand(edx));
- __ shl(edx, 1); // Retag smi for heap number overwriting test.
+ __ SmiTag(edx); // Retag smi for heap number overwriting test.
__ jmp(&load_eax);
__ bind(&load_smi_eax);
- __ sar(eax, 1); // Untag smi before converting to float.
+ __ SmiUntag(eax); // Untag smi before converting to float.
__ cvtsi2sd(xmm1, Operand(eax));
- __ shl(eax, 1); // Retag smi for heap number overwriting test.
+ __ SmiTag(eax); // Retag smi for heap number overwriting test.
__ jmp(&done);
__ bind(&load_float_eax);
__ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
@@ -7530,14 +7763,14 @@ void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm,
__ jmp(&done);
__ bind(&load_smi_1);
- __ sar(scratch, kSmiTagSize);
+ __ SmiUntag(scratch);
__ push(scratch);
__ fild_s(Operand(esp, 0));
__ pop(scratch);
__ jmp(&done_load_1);
__ bind(&load_smi_2);
- __ sar(scratch, kSmiTagSize);
+ __ SmiUntag(scratch);
__ push(scratch);
__ fild_s(Operand(esp, 0));
__ pop(scratch);
@@ -7570,86 +7803,142 @@ void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm,
}
-void UnarySubStub::Generate(MacroAssembler* masm) {
- Label undo;
- Label slow;
- Label done;
- Label try_float;
-
- // Check whether the value is a smi.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(not_zero, &try_float, not_taken);
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ Label slow, done;
- // Enter runtime system if the value of the expression is zero
- // to make sure that we switch between 0 and -0.
- __ test(eax, Operand(eax));
- __ j(zero, &slow, not_taken);
-
- // The value of the expression is a smi that is not zero. Try
- // optimistic subtraction '0 - value'.
- __ mov(edx, Operand(eax));
- __ Set(eax, Immediate(0));
- __ sub(eax, Operand(edx));
- __ j(overflow, &undo, not_taken);
+ if (op_ == Token::SUB) {
+ // Check whether the value is a smi.
+ Label try_float;
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(not_zero, &try_float, not_taken);
- // If result is a smi we are done.
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done, taken);
+ // Go slow case if the value of the expression is zero
+ // to make sure that we switch between 0 and -0.
+ __ test(eax, Operand(eax));
+ __ j(zero, &slow, not_taken);
- // Restore eax and enter runtime system.
- __ bind(&undo);
- __ mov(eax, Operand(edx));
+ // The value of the expression is a smi that is not zero. Try
+ // optimistic subtraction '0 - value'.
+ Label undo;
+ __ mov(edx, Operand(eax));
+ __ Set(eax, Immediate(0));
+ __ sub(eax, Operand(edx));
+ __ j(overflow, &undo, not_taken);
- // Enter runtime system.
- __ bind(&slow);
- __ pop(ecx); // pop return address
- __ push(eax);
- __ push(ecx); // push return address
- __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ // If result is a smi we are done.
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &done, taken);
+
+ // Restore eax and go slow case.
+ __ bind(&undo);
+ __ mov(eax, Operand(edx));
+ __ jmp(&slow);
+
+ // Try floating point case.
+ __ bind(&try_float);
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow);
+ if (overwrite_) {
+ __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
+ __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ } else {
+ __ mov(edx, Operand(eax));
+ // edx: operand
+ __ AllocateHeapNumber(eax, ebx, ecx, &undo);
+ // eax: allocated 'empty' number
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
+ __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
+ __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
+ __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ }
+ } else if (op_ == Token::BIT_NOT) {
+ // Check if the operand is a heap number.
+ __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ cmp(edx, Factory::heap_number_map());
+ __ j(not_equal, &slow, not_taken);
+
+ // Convert the heap number in eax to an untagged integer in ecx.
+ IntegerConvert(masm, eax, CpuFeatures::IsSupported(SSE3), &slow);
+
+ // Do the bitwise operation and check if the result fits in a smi.
+ Label try_float;
+ __ not_(ecx);
+ __ cmp(ecx, 0xc0000000);
+ __ j(sign, &try_float, not_taken);
+
+ // Tag the result as a smi and we're done.
+ ASSERT(kSmiTagSize == 1);
+ __ lea(eax, Operand(ecx, times_2, kSmiTag));
+ __ jmp(&done);
- // Try floating point case.
- __ bind(&try_float);
- __ mov(edx, FieldOperand(eax, HeapObject::kMapOffset));
- __ cmp(edx, Factory::heap_number_map());
- __ j(not_equal, &slow);
- if (overwrite_) {
- __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
- __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
+ // Try to store the result in a heap number.
+ __ bind(&try_float);
+ if (!overwrite_) {
+ // Allocate a fresh heap number, but don't overwrite eax until
+ // we're sure we can do it without going through the slow case
+ // that needs the value in eax.
+ __ AllocateHeapNumber(ebx, edx, edi, &slow);
+ __ mov(eax, Operand(ebx));
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ __ cvtsi2sd(xmm0, Operand(ecx));
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
+ } else {
+ __ push(ecx);
+ __ fild_s(Operand(esp, 0));
+ __ pop(ecx);
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
+ }
} else {
- __ mov(edx, Operand(eax));
- // edx: operand
- __ AllocateHeapNumber(eax, ebx, ecx, &undo);
- // eax: allocated 'empty' number
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
- __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
- __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
+ UNIMPLEMENTED();
}
+ // Return from the stub.
__ bind(&done);
-
__ StubReturn(1);
+
+ // Handle the slow case by jumping to the JavaScript builtin.
+ __ bind(&slow);
+ __ pop(ecx); // pop return address.
+ __ push(eax);
+ __ push(ecx); // push return address
+ switch (op_) {
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION);
+ break;
+ case Token::BIT_NOT:
+ __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
}
void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Check if the calling frame is an arguments adaptor frame.
- Label adaptor;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(Operand(ecx), Immediate(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ j(equal, &adaptor);
-
- // Nothing to do: The formal number of parameters has already been
- // passed in register eax by calling function. Just return it.
- __ ret(0);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it.
- __ bind(&adaptor);
- __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Otherwise nothing to do: The number of formal parameters has already been
+ // passed in register eax by calling function. Just return it.
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(equal, eax,
+ Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ } else {
+ Label exit;
+ __ j(not_equal, &exit);
+ __ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ __ bind(&exit);
+ }
__ ret(0);
}
@@ -7778,7 +8067,7 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
// Get the parameters pointer from the stack and untag the length.
__ mov(edx, Operand(esp, 2 * kPointerSize));
- __ sar(ecx, kSmiTagSize);
+ __ SmiUntag(ecx);
// Setup the elements pointer in the allocated arguments object and
// initialize the header in the elements fixed array.
@@ -7809,6 +8098,278 @@ void ArgumentsAccessStub::GenerateNewObject(MacroAssembler* masm) {
}
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+ // Just jump directly to runtime if regexp entry in generated code is turned
+ // off.
+ if (!FLAG_regexp_entry_native) {
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+ return;
+ }
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: last_match_info (expected JSArray)
+ // esp[8]: previous index
+ // esp[12]: subject string
+ // esp[16]: JSRegExp object
+
+ Label runtime;
+
+ // Check that the first argument is a JSRegExp object.
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
+ ASSERT_EQ(0, kSmiTag);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_REGEXP_TYPE, ecx);
+ __ j(not_equal, &runtime);
+ // Check that the RegExp has been compiled (data contains a fixed array).
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+#ifdef DEBUG
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Check(not_zero, "Unexpected type for RegExp data, FixedArray expected");
+ __ CmpObjectType(ecx, FIXED_ARRAY_TYPE, ebx);
+ __ Check(equal, "Unexpected type for RegExp data, FixedArray expected");
+#endif
+
+ // ecx: RegExp data (FixedArray)
+ // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+ __ mov(ebx, FieldOperand(ecx, JSRegExp::kDataTagOffset));
+ __ cmp(Operand(ebx), Immediate(Smi::FromInt(JSRegExp::IRREGEXP)));
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the number of captures fit in the static offsets vector buffer.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2. This
+ // uses the asumption that smis are 2 * their untagged value.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+ // Check that the static offsets vector buffer is large enough.
+ __ cmp(edx, OffsetsVector::kStaticOffsetsVectorSize);
+ __ j(above, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the second argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+ // Get the length of the string to ebx.
+ __ mov(ebx, FieldOperand(eax, String::kLengthOffset));
+
+ // ebx: Length of subject string
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the third argument is a positive smi.
+ __ mov(eax, Operand(esp, 2 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask | 0x80000000));
+ __ j(not_zero, &runtime);
+ // Check that it is not greater than the subject string length.
+ __ SmiUntag(eax);
+ __ cmp(eax, Operand(ebx));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // edx: Number of capture registers
+ // Check that the fourth object is a JSArray object.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ __ CmpObjectType(eax, JS_ARRAY_TYPE, ebx);
+ __ j(not_equal, &runtime);
+ // Check that the JSArray is in fast case.
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+ __ mov(eax, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(eax, Factory::fixed_array_map());
+ __ j(not_equal, &runtime);
+ // Check that the last match info has space for the capture registers and the
+ // additional information.
+ __ mov(eax, FieldOperand(ebx, FixedArray::kLengthOffset));
+ __ add(Operand(edx), Immediate(RegExpImpl::kLastMatchOverhead));
+ __ cmp(edx, Operand(eax));
+ __ j(greater, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Check the representation and encoding of the subject string (only support
+ // flat ascii strings).
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &runtime);
+
+ // ecx: RegExp data (FixedArray)
+ // Ensure that a RegExp stack is allocated.
+ ExternalReference address_of_regexp_stack_memory_address =
+ ExternalReference::address_of_regexp_stack_memory_address();
+ ExternalReference address_of_regexp_stack_memory_size =
+ ExternalReference::address_of_regexp_stack_memory_size();
+ __ mov(eax, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ test(eax, Operand(eax));
+ __ j(zero, &runtime, not_taken);
+
+ // ecx: RegExp data (FixedArray)
+ // Check that the irregexp code has been generated for an ascii string. If
+ // it has the field contains a code object otherwise it contains the hole.
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+ __ CmpObjectType(edx, CODE_TYPE, ebx);
+ __ j(not_equal, &runtime);
+
+ // Load used arguments before starting to push arguments for call to native
+ // RegExp code to avoid handling changing stack height.
+ __ mov(eax, Operand(esp, 3 * kPointerSize)); // Subject string.
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // Previous index.
+ __ mov(ecx, Operand(esp, 4 * kPointerSize)); // JSRegExp object.
+ __ SmiUntag(ebx); // Previous index from sim.
+
+ // eax: subject string
+ // ebx: previous index
+ // edx: code
+ // All checks done. Now push arguments for native regexp code.
+ __ IncrementCounter(&Counters::regexp_entry_native, 1);
+
+ // Argument 8: Indicate that this is a direct call from JavaScript.
+ __ push(Immediate(1));
+
+ // Argument 7: Start (high end) of backtracking stack memory area.
+ __ mov(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_address));
+ __ add(ecx, Operand::StaticVariable(address_of_regexp_stack_memory_size));
+ __ push(ecx);
+
+ // Argument 6: At start of string?
+ __ xor_(Operand(ecx), ecx); // setcc only operated on cl (lower byte of ecx).
+ __ test(ebx, Operand(ebx));
+ __ setcc(zero, ecx); // 1 if 0 (start of string), 0 if positive.
+ __ push(ecx);
+
+ // Argument 5: static offsets vector buffer.
+ __ push(Immediate(ExternalReference::address_of_static_offsets_vector()));
+
+ // Argument 4: End of string data.
+ __ mov(ecx, FieldOperand(eax, String::kLengthOffset));
+ __ add(ecx, Operand(eax));
+ __ add(Operand(ecx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ecx);
+
+ // Argument 3: Start of string data.
+ __ mov(ecx, ebx);
+ __ add(ebx, Operand(eax)); // String is ASCII.
+ __ add(Operand(ebx), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ push(ebx);
+
+ // Argument 2: Previous index.
+ __ push(ecx);
+
+ // Argument 1: Subject string.
+ __ push(eax);
+
+ // Locate the code entry and call it.
+ __ add(Operand(edx), Immediate(Code::kHeaderSize - kHeapObjectTag));
+ __ call(Operand(edx));
+ // Remove arguments.
+ __ add(Operand(esp), Immediate(8 * kPointerSize));
+
+ // Check the result.
+ Label success;
+ __ cmp(eax, NativeRegExpMacroAssembler::SUCCESS);
+ __ j(equal, &success, taken);
+ Label failure;
+ __ cmp(eax, NativeRegExpMacroAssembler::FAILURE);
+ __ j(equal, &failure, taken);
+ __ cmp(eax, NativeRegExpMacroAssembler::EXCEPTION);
+ // If not exception it can only be retry. Handle that in the runtime system.
+ __ j(not_equal, &runtime);
+ // Result must now be exception. If there is no pending exception already a
+ // stack overflow (on the backtrack stack) was detected in RegExp code but
+ // haven't created the exception yet. Handle that in the runtime system.
+ ExternalReference pending_exception(Top::k_pending_exception_address);
+ __ mov(eax,
+ Operand::StaticVariable(ExternalReference::the_hole_value_location()));
+ __ cmp(eax, Operand::StaticVariable(pending_exception));
+ __ j(equal, &runtime);
+ __ bind(&failure);
+ // For failure and exception return null.
+ __ mov(Operand(eax), Factory::null_value());
+ __ ret(4 * kPointerSize);
+
+ // Load RegExp data.
+ __ bind(&success);
+ __ mov(eax, Operand(esp, 4 * kPointerSize));
+ __ mov(ecx, FieldOperand(eax, JSRegExp::kDataOffset));
+ __ mov(edx, FieldOperand(ecx, JSRegExp::kIrregexpCaptureCountOffset));
+ // Calculate number of capture registers (number_of_captures + 1) * 2.
+ __ add(Operand(edx), Immediate(2)); // edx was a smi.
+
+ // edx: Number of capture registers
+ // Load last_match_info which is still known to be a fast case JSArray.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ mov(ebx, FieldOperand(eax, JSArray::kElementsOffset));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // edx: number of capture registers
+ // Store the capture count.
+ __ SmiTag(edx); // Number of capture registers to smi.
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastCaptureCountOffset), edx);
+ __ SmiUntag(edx); // Number of capture registers back from smi.
+ // Store last subject and last input.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastSubjectOffset, eax, edi);
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
+ __ mov(ecx, ebx);
+ __ RecordWrite(ecx, RegExpImpl::kLastInputOffset, eax, edi);
+
+ // Get the static offsets vector filled by the native regexp code.
+ ExternalReference address_of_static_offsets_vector =
+ ExternalReference::address_of_static_offsets_vector();
+ __ mov(ecx, Immediate(address_of_static_offsets_vector));
+
+ // ebx: last_match_info backing store (FixedArray)
+ // ecx: offsets vector
+ // edx: number of capture registers
+ Label next_capture, done;
+ __ mov(eax, Operand(esp, 2 * kPointerSize)); // Read previous index.
+ // Capture register counter starts from number of capture registers and
+ // counts down until wraping after zero.
+ __ bind(&next_capture);
+ __ sub(Operand(edx), Immediate(1));
+ __ j(negative, &done);
+ // Read the value from the static offsets vector buffer.
+ __ mov(edi, Operand(ecx, edx, times_pointer_size, 0));
+ // Perform explicit shift
+ ASSERT_EQ(0, kSmiTag);
+ __ shl(edi, kSmiTagSize);
+ // Add previous index (from its stack slot) if value is not negative.
+ Label capture_negative;
+ // Carry flag set by shift above.
+ __ j(negative, &capture_negative, not_taken);
+ __ add(edi, Operand(eax)); // Add previous index (adding smi to smi).
+ __ bind(&capture_negative);
+ // Store the smi value in the last match info.
+ __ mov(FieldOperand(ebx,
+ edx,
+ times_pointer_size,
+ RegExpImpl::kFirstCaptureOffset),
+ edi);
+ __ jmp(&next_capture);
+ __ bind(&done);
+
+ // Return last match info.
+ __ mov(eax, Operand(esp, 1 * kPointerSize));
+ __ ret(4 * kPointerSize);
+
+ // Do the runtime call to execute the regexp.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kRegExpExec), 4, 1);
+}
+
+
void CompareStub::Generate(MacroAssembler* masm) {
Label call_builtin, done;
@@ -7826,35 +8387,41 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
- Label return_equal;
- Label heap_number;
- // If it's not a heap number, then return equal.
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(Factory::heap_number_map()));
- __ j(equal, &heap_number);
- __ bind(&return_equal);
- __ Set(eax, Immediate(0));
- __ ret(0);
+ if (never_nan_nan_) {
+ __ Set(eax, Immediate(0));
+ __ ret(0);
+ } else {
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(Factory::heap_number_map()));
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ xor_(eax, Operand(eax));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
- __ add(edx, Operand(edx));
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- __ setcc(above_equal, eax);
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ xor_(eax, Operand(eax));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, Operand(edx));
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ }
__ bind(&not_identical);
}
@@ -8001,9 +8568,10 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
+ Label check_for_strings;
if (cc_ == equal) {
- BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
- BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
+ BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax already holds a
@@ -8011,6 +8579,44 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ ret(2 * kPointerSize);
}
+ __ bind(&check_for_strings);
+
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &call_builtin);
+
+ // Load instance type for both objects.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ ASSERT(kNotStringTag != 0);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ecx, kFlatAsciiString);
+ __ cmp(ecx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &call_builtin);
+
+ // Inline comparison of ascii strings.
+ StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
+ edx,
+ eax,
+ ecx,
+ ebx,
+ edi);
+#ifdef DEBUG
+ __ Abort("Unexpected fall-through from string comparison");
+#endif
+
__ bind(&call_builtin);
// must swap argument order
__ pop(ecx);
@@ -8579,10 +9185,55 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
}
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case less: return "CompareStub_LT";
+ case greater: return "CompareStub_GT";
+ case less_equal: return "CompareStub_LE";
+ case greater_equal: return "CompareStub_GE";
+ case not_equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
+}
+
+
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
- return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
}
@@ -8778,12 +9429,12 @@ void StringAddStub::Generate(MacroAssembler* masm) {
}
-void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
- Register dest,
- Register src,
- Register count,
- Register scratch,
- bool ascii) {
+void StringStubBase::GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
Label loop;
__ bind(&loop);
// This loop just copies one character at a time, as it is only used for very
@@ -8804,6 +9455,316 @@ void StringAddStub::GenerateCopyCharacters(MacroAssembler* masm,
}
+void StringStubBase::GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii) {
+ // Copy characters using rep movs of doublewords. Align destination on 4 byte
+ // boundary before starting rep movs. Copy remaining characters after running
+ // rep movs.
+ ASSERT(dest.is(edi)); // rep movs destination
+ ASSERT(src.is(esi)); // rep movs source
+ ASSERT(count.is(ecx)); // rep movs count
+ ASSERT(!scratch.is(dest));
+ ASSERT(!scratch.is(src));
+ ASSERT(!scratch.is(count));
+
+ // Nothing to do for zero characters.
+ Label done;
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Make count the number of bytes to copy.
+ if (!ascii) {
+ __ shl(count, 1);
+ }
+
+ // Don't enter the rep movs if there are less than 4 bytes to copy.
+ Label last_bytes;
+ __ test(count, Immediate(~3));
+ __ j(zero, &last_bytes);
+
+ // Copy from edi to esi using rep movs instruction.
+ __ mov(scratch, count);
+ __ sar(count, 2); // Number of doublewords to copy.
+ __ rep_movs();
+
+ // Find number of bytes left.
+ __ mov(count, scratch);
+ __ and_(count, 3);
+
+ // Check if there are more bytes to copy.
+ __ bind(&last_bytes);
+ __ test(count, Operand(count));
+ __ j(zero, &done);
+
+ // Copy remaining characters.
+ Label loop;
+ __ bind(&loop);
+ __ mov_b(scratch, Operand(src, 0));
+ __ mov_b(Operand(dest, 0), scratch);
+ __ add(Operand(src), Immediate(1));
+ __ add(Operand(dest), Immediate(1));
+ __ sub(Operand(count), Immediate(1));
+ __ j(not_zero, &loop);
+
+ __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: to
+ // esp[8]: from
+ // esp[12]: string
+
+ // Make sure first argument is a string.
+ __ mov(eax, Operand(esp, 3 * kPointerSize));
+ ASSERT_EQ(0, kSmiTag);
+ __ test(eax, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+ Condition is_string = masm->IsObjectStringType(eax, ebx, ebx);
+ __ j(NegateCondition(is_string), &runtime);
+
+ // eax: string
+ // ebx: instance type
+ // Calculate length of sub string using the smi values.
+ __ mov(ecx, Operand(esp, 1 * kPointerSize)); // to
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // from
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(not_zero, &runtime);
+ __ sub(ecx, Operand(edx));
+ // Handle sub-strings of length 2 and less in the runtime system.
+ __ SmiUntag(ecx); // Result length is no longer smi.
+ __ cmp(ecx, 2);
+ __ j(below_equal, &runtime);
+
+ // eax: string
+ // ebx: instance type
+ // ecx: result string length
+ // Check for flat ascii string
+ Label non_ascii_flat;
+ __ and_(ebx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ebx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+
+ // Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ SmiUntag(ebx);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ __ bind(&non_ascii_flat);
+ // eax: string
+ // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
+ // ecx: result string length
+ // Check for flat two byte string
+ __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
+ __ j(not_equal, &runtime);
+
+ // Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+
+ // eax: result string
+ // ecx: result string length
+ __ mov(edx, esi); // esi used by following code.
+ // Locate first character of result.
+ __ mov(edi, eax);
+ __ add(Operand(edi),
+ Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ // Load string argument and locate character of sub string start.
+ __ mov(esi, Operand(esp, 3 * kPointerSize));
+ __ add(Operand(esi), Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ // As from is a smi it is 2 times the value which matches the size of a two
+ // byte character.
+ ASSERT_EQ(0, kSmiTag);
+ ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+ __ add(esi, Operand(ebx));
+
+ // eax: result string
+ // ecx: result length
+ // edx: original value of esi
+ // edi: first character of result
+ // esi: character of sub string start
+ GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
+ __ mov(esi, edx); // Restore esi.
+ __ IncrementCounter(&Counters::sub_string_native, 1);
+ __ ret(3 * kPointerSize);
+
+ // Just jump to runtime to create the sub string.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kSubString), 3, 1);
+}
+
+
+void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3) {
+ Label compare_lengths, compare_lengths_1;
+
+ // Find minimum length. If either length is zero just compare lengths.
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ test(scratch1, Operand(scratch1));
+ __ j(zero, &compare_lengths_1);
+ __ mov(scratch2, FieldOperand(right, String::kLengthOffset));
+ __ test(scratch2, Operand(scratch2));
+ __ j(zero, &compare_lengths_1);
+ __ cmp(scratch1, Operand(scratch2));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(greater, scratch1, Operand(scratch2));
+ } else {
+ Label l;
+ __ j(less, &l);
+ __ mov(scratch1, scratch2);
+ __ bind(&l);
+ }
+
+ Label result_greater, result_less;
+ Label loop;
+ // Compare next character.
+ __ mov(scratch3, Immediate(-1)); // Index into strings.
+ __ bind(&loop);
+ // Compare characters.
+ Label character_compare_done;
+ __ add(Operand(scratch3), Immediate(1));
+ __ mov_b(scratch2, Operand(left,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ subb(scratch2, Operand(right,
+ scratch3,
+ times_1,
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
+ __ j(not_equal, &character_compare_done);
+ __ sub(Operand(scratch1), Immediate(1));
+ __ j(not_zero, &loop);
+ // If min length characters match compare lengths otherwise last character
+ // compare is the result.
+ __ bind(&character_compare_done);
+ __ j(equal, &compare_lengths);
+ __ j(less, &result_less);
+ __ jmp(&result_greater);
+
+ // Compare lengths.
+ Label result_not_equal;
+ __ bind(&compare_lengths);
+ __ mov(scratch1, FieldOperand(left, String::kLengthOffset));
+ __ bind(&compare_lengths_1);
+ __ sub(scratch1, FieldOperand(right, String::kLengthOffset));
+ __ j(not_zero, &result_not_equal);
+
+ // Result is EQUAL.
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(eax, Operand(eax));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+ __ bind(&result_not_equal);
+ __ j(greater, &result_greater);
+
+ // Result is LESS.
+ __ bind(&result_less);
+ __ mov(eax, Immediate(Smi::FromInt(LESS)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ // Result is GREATER.
+ __ bind(&result_greater);
+ __ mov(eax, Immediate(Smi::FromInt(GREATER)->value()));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+ Label runtime;
+
+ // Stack frame on entry.
+ // esp[0]: return address
+ // esp[4]: right string
+ // esp[8]: left string
+
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // left
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); // right
+
+ Label not_same;
+ __ cmp(edx, Operand(eax));
+ __ j(not_equal, &not_same);
+ ASSERT_EQ(0, EQUAL);
+ ASSERT_EQ(0, kSmiTag);
+ __ xor_(eax, Operand(eax));
+ __ IncrementCounter(&Counters::string_compare_native, 1);
+ __ ret(2 * kPointerSize);
+
+ __ bind(&not_same);
+
+ // Check that both objects are not smis.
+ ASSERT_EQ(0, kSmiTag);
+ __ mov(ebx, Operand(edx));
+ __ and_(ebx, Operand(eax));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &runtime);
+
+ // Load instance type for both strings.
+ __ mov(ecx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+
+ // Check that both are flat ascii strings.
+ Label non_ascii_flat;
+ __ and_(ecx, kStringRepresentationMask | kStringEncodingMask);
+ __ cmp(ecx, kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+ const int kFlatAsciiString =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
+ __ and_(ebx, kFlatAsciiString);
+ __ cmp(ebx, kStringTag | kSeqStringTag | kAsciiStringTag);
+ __ j(not_equal, &non_ascii_flat);
+
+ // Compare flat ascii strings.
+ GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+
+ __ bind(&non_ascii_flat);
+
+ // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+ // tagged as a small integer.
+ __ bind(&runtime);
+ __ TailCallRuntime(ExternalReference(Runtime::kStringCompare), 2, 1);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 3d17c96ad..000222ff1 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -541,15 +541,18 @@ class CodeGenerator: public AstVisitor {
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
@@ -750,7 +753,32 @@ enum StringAddFlags {
};
-class StringAddStub: public CodeStub {
+class StringStubBase: public CodeStub {
+ public:
+ // Generate code for copying characters using a simple loop. This should only
+ // be used in places where the number of characters is small and the
+ // additional setup and checking in GenerateCopyCharactersREP adds too much
+ // overhead. Copying of overlapping regions is not supported.
+ void GenerateCopyCharacters(MacroAssembler* masm,
+ Register dest,
+ Register src,
+ Register count,
+ Register scratch,
+ bool ascii);
+
+ // Generate code for copying characters using the rep movs instruction.
+ // Copies ecx characters from esi to edi. Copying of overlapping regions is
+ // not supported.
+ void GenerateCopyCharactersREP(MacroAssembler* masm,
+ Register dest, // Must be edi.
+ Register src, // Must be esi.
+ Register count, // Must be ecx.
+ Register scratch, // Neither of the above.
+ bool ascii);
+};
+
+
+class StringAddStub: public StringStubBase {
public:
explicit StringAddStub(StringAddFlags flags) {
string_check_ = ((flags & NO_STRING_CHECK_IN_STUB) == 0);
@@ -762,18 +790,45 @@ class StringAddStub: public CodeStub {
void Generate(MacroAssembler* masm);
- void GenerateCopyCharacters(MacroAssembler* masm,
- Register desc,
- Register src,
- Register count,
- Register scratch,
- bool ascii);
-
// Should the stub check whether arguments are strings?
bool string_check_;
};
+class SubStringStub: public StringStubBase {
+ public:
+ SubStringStub() {}
+
+ private:
+ Major MajorKey() { return SubString; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
+class StringCompareStub: public StringStubBase {
+ public:
+ explicit StringCompareStub() {
+ }
+
+ // Compare two flat ascii strings and returns result in eax after popping two
+ // arguments from the stack.
+ static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
+ Register left,
+ Register right,
+ Register scratch1,
+ Register scratch2,
+ Register scratch3);
+
+ private:
+ Major MajorKey() { return StringCompare; }
+ int MinorKey() { return 0; }
+
+ void Generate(MacroAssembler* masm);
+};
+
+
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 375cbdf7e..581cdc075 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -61,6 +61,7 @@ static ByteMnemonic two_operands_instr[] = {
{0x0B, "or", REG_OPER_OP_ORDER},
{0x1B, "sbb", REG_OPER_OP_ORDER},
{0x29, "sub", OPER_REG_OP_ORDER},
+ {0x2A, "subb", REG_OPER_OP_ORDER},
{0x2B, "sub", REG_OPER_OP_ORDER},
{0x85, "test", REG_OPER_OP_ORDER},
{0x31, "xor", OPER_REG_OP_ORDER},
@@ -1007,7 +1008,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
case 0x80:
{ data++;
- AppendToBuffer("%s ", "cmpb");
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ const char* mnem = NULL;
+ printf("%d\n", regop);
+ switch (regop) {
+ case 5: mnem = "subb"; break;
+ case 7: mnem = "cmpb"; break;
+ default: UnimplementedInstruction();
+ }
+ AppendToBuffer("%s ", mnem);
data += PrintRightOperand(data);
int32_t imm = *data;
AppendToBuffer(",0x%x", imm);
@@ -1057,6 +1067,19 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
+ } else if (*data == 0x6F) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movdqa %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*data == 0x7F) {
+ AppendToBuffer("movdqa ");
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
} else {
UnimplementedInstruction();
}
@@ -1093,6 +1116,11 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += 2;
break;
+ case 0x2C:
+ AppendToBuffer("subb eax,0x%x", *reinterpret_cast<uint8_t*>(data+1));
+ data += 2;
+ break;
+
case 0xA9:
AppendToBuffer("test eax,0x%x", *reinterpret_cast<int32_t*>(data+1));
data += 5;
@@ -1163,9 +1191,29 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0xF3:
- if (*(data+1) == 0x0F && *(data+2) == 0x2C) {
- data += 3;
- data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ if (*(data+1) == 0x0F) {
+ if (*(data+2) == 0x2C) {
+ data += 3;
+ data += PrintOperands("cvttss2si", REG_OPER_OP_ORDER, data);
+ } else if (*(data+2) == 0x6F) {
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ AppendToBuffer("movdqu %s,", NameOfXMMRegister(regop));
+ data += PrintRightOperand(data);
+ } else if (*(data+2) == 0x7F) {
+ AppendToBuffer("movdqu ");
+ data += 3;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ data += PrintRightOperand(data);
+ AppendToBuffer(",%s", NameOfXMMRegister(regop));
+ } else {
+ UnimplementedInstruction();
+ }
+ } else if (*(data+1) == 0xA5) {
+ data += 2;
+ AppendToBuffer("rep_movs");
} else {
UnimplementedInstruction();
}
@@ -1185,6 +1233,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
}
int instr_len = data - instr;
+ if (instr_len == 0) {
+ printf("%02x", *data);
+ }
ASSERT(instr_len > 0); // Ensure progress.
int outp = 0;
diff --git a/deps/v8/src/ia32/fast-codegen-ia32.cc b/deps/v8/src/ia32/fast-codegen-ia32.cc
index 46524d7dc..fdab58579 100644
--- a/deps/v8/src/ia32/fast-codegen-ia32.cc
+++ b/deps/v8/src/ia32/fast-codegen-ia32.cc
@@ -194,182 +194,118 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context,
+ Slot* slot,
+ Register scratch) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
- case Expression::kValue:
- __ push(source);
+ case Expression::kValue: {
+ MemOperand location = EmitSlotSearch(slot, scratch);
+ __ push(location);
break;
+ }
case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ Move(scratch, slot);
+ Apply(context, scratch);
break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
- }
}
}
-template <>
-Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
- Register scratch) {
- switch (source->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(ebp, SlotOffset(source));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
- break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
- UNREACHABLE();
- return Operand(eax, 0); // Dead code to make the compiler happy.
- }
-}
-
-
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- Operand location = CreateSlotOperand<Operand>(source, dst);
- __ mov(dst, location);
-}
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
- case Expression::kValue: {
- Operand location = CreateSlotOperand<Operand>(source, scratch);
- __ push(location);
+ case Expression::kValue:
+ __ push(Immediate(lit->handle()));
break;
- }
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
+ __ mov(eax, lit->handle());
+ Apply(context, eax);
break;
}
}
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ __ Drop(1);
break;
case Expression::kValue:
- __ push(Immediate(expr->handle()));
break;
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
- case Expression::kTestValue:
- __ mov(eax, expr->handle());
- Move(context, eax);
- break;
- }
-}
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ mov(Operand(ebp, SlotOffset(dst)), src);
+ case Expression::kTest:
+ __ pop(eax);
+ TestAndBranch(eax, true_label_, false_label_);
break;
- case Slot::CONTEXT: {
- ASSERT(!src.is(scratch1));
- ASSERT(!src.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- __ mov(Operand(scratch1, Context::SlotOffset(dst->index())), src);
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
+ case Expression::kValueTest: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, true_label_, &discard);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
break;
}
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ case Expression::kTestValue: {
+ Label discard;
+ __ mov(eax, Operand(esp, 0));
+ TestAndBranch(eax, &discard, false_label_);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ }
}
}
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int count) {
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
ASSERT(count > 0);
+ ASSERT(!reg.is(esp));
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- __ add(Operand(esp), Immediate(count * kPointerSize));
+ __ Drop(count);
break;
case Expression::kValue:
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
break;
case Expression::kTest:
- ASSERT(!source.is(esp));
- __ add(Operand(esp), Immediate(count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
+ __ Drop(count);
+ TestAndBranch(reg, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
- TestAndBranch(source, true_label_, &discard);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+ TestAndBranch(reg, true_label_, &discard);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
- if (count > 1) {
- __ add(Operand(esp), Immediate((count - 1) * kPointerSize));
- }
- __ mov(Operand(esp, 0), source);
- TestAndBranch(source, &discard, false_label_);
+ if (count > 1) __ Drop(count - 1);
+ __ mov(Operand(esp, 0), reg);
+ TestAndBranch(reg, &discard, false_label_);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -377,6 +313,47 @@ void FastCodeGenerator::DropAndMove(Expression::Context context,
}
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(ebp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(eax, 0);
+}
+
+
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ mov(destination, location);
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ mov(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ }
+}
+
+
void FastCodeGenerator::TestAndBranch(Register source,
Label* true_label,
Label* false_label) {
@@ -416,18 +393,21 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
- __ mov(Operand(ebp, SlotOffset(var->slot())),
+ __ mov(Operand(ebp, SlotOffset(slot)),
Immediate(Factory::the_hole_value()));
} else if (decl->fun() != NULL) {
Visit(decl->fun());
- __ pop(Operand(ebp, SlotOffset(var->slot())));
+ __ pop(Operand(ebp, SlotOffset(slot)));
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -499,7 +479,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
// Value in eax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -529,7 +509,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ push(esi);
__ push(Immediate(boilerplate));
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
}
@@ -556,13 +536,13 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// Remember that the assembler may choose to do peephole optimization
// (eg, push/pop elimination).
__ nop();
- DropAndMove(context, eax);
+ DropAndApply(1, context, eax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
switch (slot->type()) {
- case Slot::LOCAL:
- case Slot::PARAMETER: {
+ case Slot::PARAMETER:
+ case Slot::LOCAL: {
Comment cmnt(masm_, "Stack slot");
break;
}
@@ -573,46 +553,45 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(context, slot, eax);
+ Apply(context, slot, eax);
} else {
- Comment cmnt(masm_, "Variable rewritten to Property");
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
+ // Assert that the object is in a slot.
Variable* object_var = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object_var);
Slot* object_slot = object_var->slot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
- Move(Expression::kValue, object_slot, eax);
+ MemOperand object_loc = EmitSlotSearch(object_slot, eax);
+ __ push(object_loc);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- Move(Expression::kValue, key_literal);
+ __ push(Immediate(key_literal->handle()));
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test eax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
+ // Notice: We must not have a "test eax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
__ nop();
// Drop key and object left on the stack by IC.
- DropAndMove(context, eax, 2);
+ DropAndApply(2, context, eax);
}
}
@@ -640,7 +619,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
}
@@ -672,7 +651,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL: // Fall through.
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
@@ -686,7 +665,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
}
- // fall through
+ // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(eax);
Visit(key);
@@ -696,7 +675,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kSetProperty, 3);
__ mov(eax, Operand(esp, 0)); // Restore result into eax.
break;
- case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(eax);
Visit(key);
@@ -716,7 +695,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(eax);
@@ -730,7 +709,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(eax);
TestAndBranch(eax, true_label_, &discard);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -739,7 +718,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(eax);
TestAndBranch(eax, &discard, false_label_);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -752,7 +731,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(expr->literals()));
+ __ push(Immediate(expr->constant_elements()));
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
@@ -795,7 +774,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ add(Operand(esp), Immediate(kPointerSize));
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(eax);
@@ -809,7 +788,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(eax);
TestAndBranch(eax, true_label_, &discard);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -818,7 +797,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(eax);
TestAndBranch(eax, &discard, false_label_);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -828,18 +807,21 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
+ SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ mov(ecx, Immediate(key->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- Move(context, eax);
+ Apply(context, eax);
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
+ Expression::Context context) {
+ SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- Move(context, eax);
+ Apply(context, eax);
}
@@ -849,12 +831,12 @@ void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
- Move(context, eax);
+ Apply(context, eax);
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
@@ -867,15 +849,15 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// Overwrite the receiver on the stack with the result if needed.
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, context, eax);
} else if (var->slot() != NULL) {
Slot* slot = var->slot();
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER: {
- Operand target = Operand(ebp, SlotOffset(var->slot()));
- switch (expr->context()) {
+ Operand target = Operand(ebp, SlotOffset(slot));
+ switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
@@ -899,7 +881,7 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
__ mov(target, eax);
TestAndBranch(eax, true_label_, &discard);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -909,7 +891,7 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
__ mov(target, eax);
TestAndBranch(eax, &discard, false_label_);
__ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -918,41 +900,20 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
}
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the context chain to the context containing the slot.
- __ mov(eax,
- Operand(esi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ mov(eax,
- Operand(eax, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ mov(eax, FieldOperand(eax, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ mov(eax, esi); // RecordWrite destroys the object register.
- }
- if (FLAG_debug_code) {
- __ cmp(eax,
- Operand(eax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ Check(equal, "Context Slot chain length wrong.");
- }
- __ pop(ecx);
- __ mov(Operand(eax, Context::SlotOffset(slot->index())), ecx);
+ MemOperand target = EmitSlotSearch(slot, ecx);
+ __ pop(eax);
+ __ mov(target, eax);
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(ecx);
- } else if (expr->context() != Expression::kEffect) {
- __ mov(edx, ecx);
+ if (context == Expression::kValue) {
+ __ push(eax);
+ } else if (context != Expression::kEffect) {
+ __ mov(edx, eax);
}
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(eax, offset, ecx, ebx);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), edx);
+ __ RecordWrite(ecx, offset, eax, ebx);
+ if (context != Expression::kEffect && context != Expression::kValue) {
+ Apply(context, edx);
}
break;
}
@@ -961,6 +922,10 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -992,7 +957,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(eax);
}
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, expr->context(), eax);
}
@@ -1025,34 +990,32 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Receiver and key are still on stack.
- __ add(Operand(esp), Immediate(2 * kPointerSize));
- Move(expr->context(), eax);
+ DropAndApply(2, expr->context(), eax);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
// Record the source position for the property load.
SetSourcePosition(expr->position());
- // Evaluate receiver.
+ // Evaluate the receiver.
Visit(expr->obj());
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in ecx and the receiver on the stack.
+ if (key->IsPropertyName()) {
+ // Do a named property load. The IC expects the property name in ecx
+ // and the receiver on the stack.
__ mov(ecx, Immediate(key->AsLiteral()->handle()));
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a test eax
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
+ DropAndApply(1, expr->context(), eax);
} else {
- // Do a KEYED property load.
+ // Do a keyed property load.
Visit(expr->key());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
@@ -1060,9 +1023,8 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
- __ add(Operand(esp), Immediate(kPointerSize));
+ DropAndApply(2, expr->context(), eax);
}
- DropAndMove(expr->context(), eax);
}
@@ -1084,7 +1046,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
__ call(ic, mode);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
}
@@ -1101,7 +1063,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
__ CallStub(&stub);
// Restore context register.
__ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, expr->context(), eax);
}
@@ -1142,14 +1104,15 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
- __ add(Operand(esp), Immediate(kPointerSize));
+ __ Drop(1);
// Pop receiver.
__ pop(ebx);
// Push result (function).
__ push(eax);
// Push receiver object on stack.
if (prop->is_synthetic()) {
- __ push(CodeGenerator::GlobalObject());
+ __ mov(ecx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
} else {
__ push(ebx);
}
@@ -1210,7 +1173,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
__ call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in eax, or pop it.
- DropAndMove(expr->context(), eax);
+ DropAndApply(1, expr->context(), eax);
}
@@ -1243,7 +1206,7 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
}
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
}
@@ -1266,7 +1229,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Value is false so it's needed.
__ push(Immediate(Factory::undefined_value()));
// Fall through.
- case Expression::kTest: // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1278,20 +1241,19 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
ASSERT_EQ(Expression::kTest, expr->expression()->context());
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ Label push_true, push_false, done;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
+ case Expression::kEffect:
+ VisitForControl(expr->expression(), &done, &done);
+ __ bind(&done);
+ break;
+
case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, &push_true);
__ bind(&push_true);
__ push(Immediate(Factory::true_value()));
__ jmp(&done);
@@ -1300,39 +1262,24 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
__ bind(&done);
break;
- case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
- __ bind(&done);
- break;
-
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, true_label_);
break;
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, &push_true);
__ bind(&push_true);
__ push(Immediate(Factory::true_value()));
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, true_label_);
__ bind(&push_false);
__ push(Immediate(Factory::false_value()));
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
break;
}
@@ -1365,7 +1312,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
break;
}
@@ -1377,27 +1324,75 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
- Visit(proxy);
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type = (prop->key()->context() == Expression::kUninitialized)
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && expr->context() != Expression::kEffect) {
+ ASSERT(expr->context() != Expression::kUninitialized);
+ __ push(Immediate(Smi::FromInt(0)));
+ }
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop, Expression::kValue);
+ } else {
+ Visit(prop->key());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ EmitKeyedPropertyLoad(prop, Expression::kValue);
+ }
+ }
+
+ // Convert to number.
__ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(eax);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue:
+ case Expression::kTest:
+ case Expression::kTestValue:
+ case Expression::kValueTest:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ break;
+ case NAMED_PROPERTY:
+ __ mov(Operand(esp, kPointerSize), eax);
+ break;
+ case KEYED_PROPERTY:
+ __ mov(Operand(esp, 2 * kPointerSize), eax);
+ break;
+ }
+ break;
+ }
}
+
// Call runtime for +1/-1.
__ push(eax);
__ push(Immediate(Smi::FromInt(1)));
@@ -1406,42 +1401,55 @@ void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
__ CallRuntime(Runtime::kNumberSub, 2);
}
- // Call Store IC.
- __ mov(ecx, proxy->AsVariable()->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC.
- __ add(Operand(esp), Immediate(kPointerSize));
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
- break;
- case Expression::kTest:
- __ pop(eax);
- TestAndBranch(eax, true_label_, false_label_);
+ // Store the value returned in eax.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(eax);
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ expr->context());
+ }
break;
- case Expression::kValueTest: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, true_label_, &discard);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(false_label_);
+ case NAMED_PROPERTY: {
+ __ mov(ecx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(1, expr->context(), eax);
+ }
break;
}
- case Expression::kTestValue: {
- Label discard;
- __ mov(eax, Operand(esp, 0));
- TestAndBranch(eax, &discard, false_label_);
- __ bind(&discard);
- __ add(Operand(esp), Immediate(kPointerSize));
- __ jmp(true_label_);
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(2, expr->context(), eax);
+ }
break;
}
}
@@ -1483,7 +1491,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
break;
}
@@ -1500,46 +1508,40 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label push_true, push_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
- case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
- break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
+ break;
+ case Expression::kValue:
+ if_true = &push_true;
+ if_false = &push_false;
break;
-
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &push_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &push_false;
break;
}
- // Convert current context to test context: End pre-test code.
switch (expr->op()) {
case Token::IN: {
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ cmp(eax, Factory::true_value());
- __ j(equal, true_label_);
- __ jmp(false_label_);
+ __ j(equal, if_true);
+ __ jmp(if_false);
break;
}
@@ -1547,8 +1549,8 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(zero, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
@@ -1600,24 +1602,29 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ test(ecx, Immediate(kSmiTagMask));
__ j(not_zero, &slow_case, not_taken);
__ cmp(edx, Operand(eax));
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ test(eax, Operand(eax));
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
case Expression::kValue:
__ bind(&push_true);
__ push(Immediate(Factory::true_value()));
@@ -1627,34 +1634,27 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&done);
break;
- case Expression::kEffect:
- __ bind(&done);
- break;
-
case Expression::kTest:
break;
case Expression::kValueTest:
__ bind(&push_true);
__ push(Immediate(Factory::true_value()));
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
__ bind(&push_false);
__ push(Immediate(Factory::false_value()));
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), eax);
+ Apply(expr->context(), eax);
}
@@ -1706,11 +1706,6 @@ void FastCodeGenerator::ExitFinallyBlock() {
}
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index 58fe2dc99..5658605aa 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -313,6 +313,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Is the string a symbol?
__ movzx_b(ebx, FieldOperand(edx, Map::kInstanceTypeOffset));
+ ASSERT(kSymbolTag != 0);
__ test(ebx, Immediate(kIsSymbolMask));
__ j(zero, &slow, not_taken);
@@ -391,6 +392,48 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- esp[0] : return address
+ // -- esp[4] : key
+ // -- esp[8] : receiver
+ // -----------------------------------
+ Label miss, index_ok;
+
+ // Pop return address.
+ // Performing the load early is better in the common case.
+ __ pop(eax);
+
+ __ mov(ebx, Operand(esp, 1 * kPointerSize));
+ __ test(ebx, Immediate(kSmiTagMask));
+ __ j(zero, &miss);
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ test(ecx, Immediate(kIsNotStringMask));
+ __ j(not_zero, &miss);
+
+ // Check if key is a smi or a heap number.
+ __ mov(edx, Operand(esp, 0));
+ __ test(edx, Immediate(kSmiTagMask));
+ __ j(zero, &index_ok);
+ __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
+ __ cmp(ecx, Factory::heap_number_map());
+ __ j(not_equal, &miss);
+
+ __ bind(&index_ok);
+ // Duplicate receiver and key since they are expected on the stack after
+ // the KeyedLoadIC call.
+ __ push(ebx); // receiver
+ __ push(edx); // key
+ __ push(eax); // return address
+ __ InvokeBuiltin(Builtins::STRING_CHAR_AT, JUMP_FUNCTION);
+
+ __ bind(&miss);
+ __ push(eax);
+ GenerateMiss(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 3ecbcee81..d7c7d3a23 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -325,6 +325,17 @@ void MacroAssembler::CmpInstanceType(Register map, InstanceType type) {
}
+Condition MacroAssembler::IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type) {
+ mov(map, FieldOperand(heap_object, HeapObject::kMapOffset));
+ movzx_b(instance_type, FieldOperand(map, Map::kInstanceTypeOffset));
+ ASSERT(kNotStringTag != 0);
+ test(instance_type, Immediate(kIsNotStringMask));
+ return zero;
+}
+
+
void MacroAssembler::FCmp() {
if (CpuFeatures::IsSupported(CMOV)) {
fucomip();
@@ -729,13 +740,13 @@ void MacroAssembler::AllocateInNewSpace(int object_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
@@ -759,13 +770,13 @@ void MacroAssembler::AllocateInNewSpace(int header_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
@@ -790,13 +801,13 @@ void MacroAssembler::AllocateInNewSpace(Register object_size,
cmp(result_end, Operand::StaticVariable(new_space_allocation_limit));
j(above, gc_required, not_taken);
- // Update allocation top.
- UpdateAllocationTopHelper(result_end, scratch);
-
// Tag result if requested.
if ((flags & TAG_OBJECT) != 0) {
- or_(Operand(result), Immediate(kHeapObjectTag));
+ lea(result, Operand(result, kHeapObjectTag));
}
+
+ // Update allocation top.
+ UpdateAllocationTopHelper(result_end, scratch);
}
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index bc7c0f880..ceecebf70 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -33,10 +33,13 @@
namespace v8 {
namespace internal {
+// Convenience for platform-independent signatures. We do not normally
+// distinguish memory operands from other operands on ia32.
+typedef Operand MemOperand;
+
// Forward declaration.
class JumpTarget;
-
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
@@ -138,10 +141,28 @@ class MacroAssembler: public Assembler {
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
+ // Check if the object in register heap_object is a string. Afterwards the
+ // register map contains the object map and the register instance_type
+ // contains the instance_type. The registers map and instance_type can be the
+ // same in which case it contains the instance type afterwards. Either of the
+ // registers map and instance_type can be the same as heap_object.
+ Condition IsObjectStringType(Register heap_object,
+ Register map,
+ Register instance_type);
+
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
+ // Smi tagging support.
+ void SmiTag(Register reg) {
+ ASSERT(kSmiTag == 0);
+ shl(reg, kSmiTagSize);
+ }
+ void SmiUntag(Register reg) {
+ sar(reg, kSmiTagSize);
+ }
+
// ---------------------------------------------------------------------------
// Exception handling
@@ -347,6 +368,8 @@ class MacroAssembler: public Assembler {
void Ret();
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the esp register.
void Drop(int element_count);
void Call(Label* target) { call(target); }
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index 2e13d8aee..e41f9c3f0 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -55,13 +55,17 @@ namespace internal {
*
* Each call to a public method should retain this convention.
* The stack will have the following structure:
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - void* input_string (location of a handle containing the string)
+ * - direct_call (if 1, direct call from JavaScript code, if 0
+ * call through the runtime system)
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (location of a handle containing the string)
* --- frame alignment (if applicable) ---
* - return address
* ebp-> - old ebp
@@ -81,11 +85,13 @@ namespace internal {
* The data up to the return address must be placed there by the calling
* code, by calling the code entry as cast to a function with the signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
*/
#define __ ACCESS_MASM(masm_)
@@ -471,8 +477,6 @@ void RegExpMacroAssemblerIA32::CheckNotCharacterAfterMinusAnd(
bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
// (c - min) <= (max - min) check
@@ -481,17 +485,12 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmp(current_character(), ' ');
__ j(equal, &success);
// Check range 0x09..0x0d
- __ sub(Operand(current_character()), Immediate('\t'));
- __ cmp(current_character(), '\r' - '\t');
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
BranchOrBacktrack(above, on_no_match);
__ bind(&success);
return true;
@@ -499,72 +498,118 @@ bool RegExpMacroAssemblerIA32::CheckSpecialCharacterClass(uc16 type,
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmp(current_character(), ' ');
BranchOrBacktrack(equal, on_no_match);
- __ sub(Operand(current_character()), Immediate('\t'));
- __ cmp(current_character(), '\r' - '\t');
+ __ lea(eax, Operand(current_character(), -'\t'));
+ __ cmp(eax, '\r' - '\t');
BranchOrBacktrack(below_equal, on_no_match);
return true;
}
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ sub(Operand(current_character()), Immediate('0'));
- __ cmp(current_character(), '9' - '0');
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ sub(Operand(current_character()), Immediate('0'));
- __ cmp(current_character(), '9' - '0');
+ __ lea(eax, Operand(current_character(), -'0'));
+ __ cmp(eax, '9' - '0');
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ xor_(Operand(current_character()), Immediate(0x01));
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ sub(Operand(current_character()), Immediate(0x0b));
- __ cmp(current_character(), 0x0c - 0x0b);
+ __ sub(Operand(eax), Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ sub(Operand(current_character()), Immediate(0x2028 - 0x0b));
- __ cmp(current_character(), 1);
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 0x2029 - 0x2028);
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
+ case 'w': {
+ Label done, check_digits;
+ __ cmp(Operand(current_character()), Immediate('9'));
+ __ j(less_equal, &check_digits);
+ __ cmp(Operand(current_character()), Immediate('_'));
+ __ j(equal, &done);
+ // Convert to lower case if letter.
+ __ mov(Operand(eax), current_character());
+ __ or_(eax, 0x20);
+ // check current character in range ['a'..'z'], nondestructively.
+ __ sub(Operand(eax), Immediate('a'));
+ __ cmp(Operand(eax), Immediate('z' - 'a'));
+ BranchOrBacktrack(above, on_no_match);
+ __ jmp(&done);
+ __ bind(&check_digits);
+ // Check current character in range ['0'..'9'].
+ __ cmp(Operand(current_character()), Immediate('0'));
+ BranchOrBacktrack(below, on_no_match);
+ __ bind(&done);
+
+ return true;
+ }
+ case 'W': {
+ Label done, check_digits;
+ __ cmp(Operand(current_character()), Immediate('9'));
+ __ j(less_equal, &check_digits);
+ __ cmp(Operand(current_character()), Immediate('_'));
+ BranchOrBacktrack(equal, on_no_match);
+ // Convert to lower case if letter.
+ __ mov(Operand(eax), current_character());
+ __ or_(eax, 0x20);
+ // check current character in range ['a'..'z'], nondestructively.
+ __ sub(Operand(eax), Immediate('a'));
+ __ cmp(Operand(eax), Immediate('z' - 'a'));
+ BranchOrBacktrack(below_equal, on_no_match);
+ __ jmp(&done);
+ __ bind(&check_digits);
+ // Check current character in range ['0'..'9'].
+ __ cmp(Operand(current_character()), Immediate('0'));
+ BranchOrBacktrack(above_equal, on_no_match);
+ __ bind(&done);
+ return true;
+ }
+ // Non-standard classes (with no syntactic shorthand) used internally.
case '*':
// Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
+ return true;
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 or 0x2029).
+ // The opposite of '.'.
+ __ mov(Operand(eax), current_character());
+ __ xor_(Operand(eax), Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ sub(Operand(eax), Immediate(0x0b));
+ __ cmp(eax, 0x0c - 0x0b);
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ ASSERT_EQ(UC16, mode_);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ sub(Operand(eax), Immediate(0x2028 - 0x0b));
+ __ cmp(eax, 1);
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
}
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ }
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
@@ -942,6 +987,12 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
// If not real stack overflow the stack guard was used to interrupt
// execution for another purpose.
+ // If this is a direct call from JavaScript retry the RegExp forcing the call
+ // through the runtime system. Currently the direct call cannot handle a GC.
+ if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+ return RETRY;
+ }
+
// Prepare for possible GC.
HandleScope handles;
Handle<Code> code_handle(re_code);
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
index 5ffd46275..8e7a6a5d3 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.h
@@ -78,10 +78,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
// Checks whether the given offset from the current position is before
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
- virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
- Label* on_no_match);
+ virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
@@ -128,6 +125,7 @@ class RegExpMacroAssemblerIA32: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd + kPointerSize;
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
// Below the frame pointer - local stack variables.
// When adding local variables remember to push space for them in
// the frame in GetCode.
diff --git a/deps/v8/src/ia32/simulator-ia32.h b/deps/v8/src/ia32/simulator-ia32.h
index ce7ed0ec9..3ebd2e61b 100644
--- a/deps/v8/src/ia32/simulator-ia32.h
+++ b/deps/v8/src/ia32/simulator-ia32.h
@@ -52,9 +52,9 @@ class SimulatorStack : public v8::internal::AllStatic {
};
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index 75e478bc8..218423031 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -236,7 +236,7 @@ void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
// Load length from the string and convert to a smi.
__ bind(&load_length);
__ mov(eax, FieldOperand(receiver, String::kLengthOffset));
- __ shl(eax, kSmiTagSize);
+ __ SmiTag(eax);
__ ret(0);
// Check if the object is a JSValue wrapper.
@@ -1900,17 +1900,23 @@ Object* ConstructStubCompiler::CompileConstructStub(
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed;
- // Set the property to undefined.
- __ mov(Operand(edx, i * kPointerSize), edi);
// Check if the argument assigned to the property is actually passed.
+ // If argument is not passed the property is set to undefined,
+ // otherwise find it on the stack.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ mov(ebx, edi);
__ cmp(eax, arg_number);
- __ j(below_equal, &not_passed);
- // Argument passed - find it on the stack.
- __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmov(above, ebx, Operand(ecx, arg_number * -kPointerSize));
+ } else {
+ Label not_passed;
+ __ j(below_equal, &not_passed);
+ __ mov(ebx, Operand(ecx, arg_number * -kPointerSize));
+ __ bind(&not_passed);
+ }
+ // Store value in the property.
__ mov(Operand(edx, i * kPointerSize), ebx);
- __ bind(&not_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.cc b/deps/v8/src/ia32/virtual-frame-ia32.cc
index ba6488607..104d18750 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.cc
+++ b/deps/v8/src/ia32/virtual-frame-ia32.cc
@@ -513,13 +513,33 @@ void VirtualFrame::AllocateStackSlots() {
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- Result temp = cgen()->allocator()->Allocate();
- ASSERT(temp.is_valid());
- __ Set(temp.reg(), Immediate(undefined));
+ if (count == 1) {
+ __ push(Immediate(undefined));
+ } else if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ Result temp = cgen()->allocator()->Allocate();
+ ASSERT(temp.is_valid());
+ __ Set(temp.reg(), Immediate(undefined));
+ for (int i = 0; i < count; i++) {
+ __ push(temp.reg());
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ Result cnt = cgen()->allocator()->Allocate();
+ Result tmp = cgen()->allocator()->Allocate();
+ ASSERT(cnt.is_valid());
+ ASSERT(tmp.is_valid());
+ __ mov(cnt.reg(), Immediate(count));
+ __ mov(tmp.reg(), Immediate(undefined));
+ __ bind(&alloc_locals_loop);
+ __ push(tmp.reg());
+ __ dec(cnt.reg());
+ __ j(not_zero, &alloc_locals_loop);
+ }
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
- __ push(temp.reg());
}
}
}
diff --git a/deps/v8/src/ia32/virtual-frame-ia32.h b/deps/v8/src/ia32/virtual-frame-ia32.h
index 6c6b4816d..d6d55d12c 100644
--- a/deps/v8/src/ia32/virtual-frame-ia32.h
+++ b/deps/v8/src/ia32/virtual-frame-ia32.h
@@ -199,6 +199,9 @@ class VirtualFrame: public ZoneObject {
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 10;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
@@ -392,6 +395,8 @@ class VirtualFrame: public ZoneObject {
// Pushing a result invalidates it (its contents become owned by the
// frame).
void Push(Result* result) {
+ // This assert will trigger if you try to push the same value twice.
+ ASSERT(result->is_valid());
if (result->is_register()) {
Push(result->reg());
} else {
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 2661a1024..4edf6f18b 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -874,7 +874,9 @@ Object* KeyedLoadIC::Load(State state,
if (use_ic) {
Code* stub = generic_stub();
- if (object->IsJSObject()) {
+ if (object->IsString() && key->IsNumber()) {
+ stub = string_stub();
+ } else if (object->IsJSObject()) {
Handle<JSObject> receiver = Handle<JSObject>::cast(object);
if (receiver->HasExternalArrayElements()) {
stub = external_array_stub(receiver->GetElementsKind());
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index f53c6ddf0..1dd7edf61 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -280,6 +280,7 @@ class KeyedLoadIC: public IC {
static void GenerateInitialize(MacroAssembler* masm);
static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
+ static void GenerateString(MacroAssembler* masm);
// Generators for external array types. See objects.h.
// These are similar to the generic IC; they optimize the case of
@@ -313,6 +314,9 @@ class KeyedLoadIC: public IC {
static Code* pre_monomorphic_stub() {
return Builtins::builtin(Builtins::KeyedLoadIC_PreMonomorphic);
}
+ static Code* string_stub() {
+ return Builtins::builtin(Builtins::KeyedLoadIC_String);
+ }
static Code* external_array_stub(JSObject::ElementsKind elements_kind);
static void Clear(Address address, Code* target);
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 04d194419..8af472d39 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -112,37 +112,6 @@ static inline void ThrowRegExpException(Handle<JSRegExp> re,
// Generic RegExp methods. Dispatches to implementation specific methods.
-class OffsetsVector {
- public:
- inline OffsetsVector(int num_registers)
- : offsets_vector_length_(num_registers) {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
- vector_ = NewArray<int>(offsets_vector_length_);
- } else {
- vector_ = static_offsets_vector_;
- }
- }
- inline ~OffsetsVector() {
- if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
- DeleteArray(vector_);
- vector_ = NULL;
- }
- }
- inline int* vector() { return vector_; }
- inline int length() { return offsets_vector_length_; }
-
- private:
- int* vector_;
- int offsets_vector_length_;
- static const int kStaticOffsetsVectorSize = 50;
- static int static_offsets_vector_[kStaticOffsetsVectorSize];
-};
-
-
-int OffsetsVector::static_offsets_vector_[
- OffsetsVector::kStaticOffsetsVectorSize];
-
-
Handle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
Handle<String> pattern,
Handle<String> flag_str) {
@@ -448,6 +417,14 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
+ // Capture values are relative to start_offset only.
+ // Convert them to be relative to start of string.
+ if (captures_vector[i] >= 0) {
+ captures_vector[i] += previous_index;
+ }
+ if (captures_vector[i + 1] >= 0) {
+ captures_vector[i + 1] += previous_index;
+ }
SetCapture(*array, i, captures_vector[i]);
SetCapture(*array, i + 1, captures_vector[i + 1]);
}
@@ -1431,14 +1408,6 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
int cp_offset,
bool check_offset,
bool preloaded) {
- if (cc->is_standard() &&
- macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
- cp_offset,
- check_offset,
- on_failure)) {
- return;
- }
-
ZoneList<CharacterRange>* ranges = cc->ranges();
int max_char;
if (ascii) {
@@ -1489,6 +1458,12 @@ static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check_offset);
}
+ if (cc->is_standard() &&
+ macro_assembler->CheckSpecialCharacterClass(cc->standard_type(),
+ on_failure)) {
+ return;
+ }
+
for (int i = 0; i < last_valid_range; i++) {
CharacterRange& range = ranges->at(i);
Label next_range;
@@ -1626,8 +1601,8 @@ int TextNode::EatsAtLeast(int still_to_find, int recursion_depth) {
}
-int NegativeLookaheadChoiceNode:: EatsAtLeast(int still_to_find,
- int recursion_depth) {
+int NegativeLookaheadChoiceNode::EatsAtLeast(int still_to_find,
+ int recursion_depth) {
if (recursion_depth > RegExpCompiler::kMaxRecursion) return 0;
// Alternative 0 is the negative lookahead, alternative 1 is what comes
// afterwards.
@@ -2049,6 +2024,12 @@ static void EmitWordCheck(RegExpMacroAssembler* assembler,
Label* word,
Label* non_word,
bool fall_through_on_word) {
+ if (assembler->CheckSpecialCharacterClass(
+ fall_through_on_word ? 'w' : 'W',
+ fall_through_on_word ? non_word : word)) {
+ // Optimized implementation available.
+ return;
+ }
assembler->CheckCharacterGT('z', non_word);
assembler->CheckCharacterLT('0', non_word);
assembler->CheckCharacterGT('a' - 1, word);
@@ -2085,17 +2066,60 @@ static void EmitHat(RegExpCompiler* compiler,
assembler->LoadCurrentCharacter(new_trace.cp_offset() -1,
new_trace.backtrack(),
false);
- // Newline means \n, \r, 0x2028 or 0x2029.
- if (!compiler->ascii()) {
- assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ if (!assembler->CheckSpecialCharacterClass('n',
+ new_trace.backtrack())) {
+ // Newline means \n, \r, 0x2028 or 0x2029.
+ if (!compiler->ascii()) {
+ assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
+ }
+ assembler->CheckCharacter('\n', &ok);
+ assembler->CheckNotCharacter('\r', new_trace.backtrack());
}
- assembler->CheckCharacter('\n', &ok);
- assembler->CheckNotCharacter('\r', new_trace.backtrack());
assembler->Bind(&ok);
on_success->Emit(compiler, &new_trace);
}
+// Emit the code to handle \b and \B (word-boundary or non-word-boundary)
+// when we know whether the next character must be a word character or not.
+static void EmitHalfBoundaryCheck(AssertionNode::AssertionNodeType type,
+ RegExpCompiler* compiler,
+ RegExpNode* on_success,
+ Trace* trace) {
+ RegExpMacroAssembler* assembler = compiler->macro_assembler();
+ Label done;
+
+ Trace new_trace(*trace);
+
+ bool expect_word_character = (type == AssertionNode::AFTER_WORD_CHARACTER);
+ Label* on_word = expect_word_character ? &done : new_trace.backtrack();
+ Label* on_non_word = expect_word_character ? new_trace.backtrack() : &done;
+
+ // Check whether previous character was a word character.
+ switch (trace->at_start()) {
+ case Trace::TRUE:
+ if (expect_word_character) {
+ assembler->GoTo(on_non_word);
+ }
+ break;
+ case Trace::UNKNOWN:
+ ASSERT_EQ(0, trace->cp_offset());
+ assembler->CheckAtStart(on_non_word);
+ // Fall through.
+ case Trace::FALSE:
+ int prev_char_offset = trace->cp_offset() - 1;
+ assembler->LoadCurrentCharacter(prev_char_offset, NULL, false, 1);
+ EmitWordCheck(assembler, on_word, on_non_word, expect_word_character);
+ // We may or may not have loaded the previous character.
+ new_trace.InvalidateCurrentCharacter();
+ }
+
+ assembler->Bind(&done);
+
+ on_success->Emit(compiler, &new_trace);
+}
+
+
// Emit the code to handle \b and \B (word-boundary or non-word-boundary).
static void EmitBoundaryCheck(AssertionNode::AssertionNodeType type,
RegExpCompiler* compiler,
@@ -2205,10 +2229,15 @@ void AssertionNode::Emit(RegExpCompiler* compiler, Trace* trace) {
case AFTER_NEWLINE:
EmitHat(compiler, on_success(), trace);
return;
- case AT_NON_BOUNDARY:
case AT_BOUNDARY:
+ case AT_NON_BOUNDARY: {
EmitBoundaryCheck(type_, compiler, on_success(), trace);
return;
+ }
+ case AFTER_WORD_CHARACTER:
+ case AFTER_NONWORD_CHARACTER: {
+ EmitHalfBoundaryCheck(type_, compiler, on_success(), trace);
+ }
}
on_success()->Emit(compiler, trace);
}
@@ -2791,7 +2820,7 @@ void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
// to generate probably can't use it.
if (i != first_normal_choice) {
alt_gen->expects_preload = false;
- new_trace.set_characters_preloaded(0);
+ new_trace.InvalidateCurrentCharacter();
}
if (i < choice_count - 1) {
new_trace.set_backtrack(&alt_gen->after);
@@ -3282,6 +3311,12 @@ void DotPrinter::VisitAssertion(AssertionNode* that) {
case AssertionNode::AFTER_NEWLINE:
stream()->Add("label=\"(?<=\\n)\", shape=septagon");
break;
+ case AssertionNode::AFTER_WORD_CHARACTER:
+ stream()->Add("label=\"(?<=\\w)\", shape=septagon");
+ break;
+ case AssertionNode::AFTER_NONWORD_CHARACTER:
+ stream()->Add("label=\"(?<=\\W)\", shape=septagon");
+ break;
}
stream()->Add("];\n");
PrintAttributes(that);
@@ -3484,6 +3519,20 @@ bool RegExpCharacterClass::is_standard() {
set_.set_standard_set_type('.');
return true;
}
+ if (CompareRanges(set_.ranges(),
+ kLineTerminatorRanges,
+ kLineTerminatorRangeCount)) {
+ set_.set_standard_set_type('n');
+ return true;
+ }
+ if (CompareRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('w');
+ return true;
+ }
+ if (CompareInverseRanges(set_.ranges(), kWordRanges, kWordRangeCount)) {
+ set_.set_standard_set_type('W');
+ return true;
+ }
return false;
}
@@ -4010,6 +4059,101 @@ void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
}
+bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
+ ASSERT_NOT_NULL(ranges);
+ int n = ranges->length();
+ if (n <= 1) return true;
+ int max = ranges->at(0).to();
+ for (int i = 1; i < n; i++) {
+ CharacterRange next_range = ranges->at(i);
+ if (next_range.from() <= max + 1) return false;
+ max = next_range.to();
+ }
+ return true;
+}
+
+SetRelation CharacterRange::WordCharacterRelation(
+ ZoneList<CharacterRange>* range) {
+ ASSERT(IsCanonical(range));
+ int i = 0; // Word character range index.
+ int j = 0; // Argument range index.
+ ASSERT_NE(0, kWordRangeCount);
+ SetRelation result;
+ if (range->length() == 0) {
+ result.SetElementsInSecondSet();
+ return result;
+ }
+ CharacterRange argument_range = range->at(0);
+ CharacterRange word_range = CharacterRange(kWordRanges[0], kWordRanges[1]);
+ while (i < kWordRangeCount && j < range->length()) {
+ // Check the two ranges for the five cases:
+ // - no overlap.
+ // - partial overlap (there are elements in both ranges that isn't
+ // in the other, and there are also elements that are in both).
+ // - argument range entirely inside word range.
+ // - word range entirely inside argument range.
+ // - ranges are completely equal.
+
+ // First check for no overlap. The earlier range is not in the other set.
+ if (argument_range.from() > word_range.to()) {
+ // Ranges are disjoint. The earlier word range contains elements that
+ // cannot be in the argument set.
+ result.SetElementsInSecondSet();
+ } else if (word_range.from() > argument_range.to()) {
+ // Ranges are disjoint. The earlier argument range contains elements that
+ // cannot be in the word set.
+ result.SetElementsInFirstSet();
+ } else if (word_range.from() <= argument_range.from() &&
+ word_range.to() >= argument_range.from()) {
+ result.SetElementsInBothSets();
+ // argument range completely inside word range.
+ if (word_range.from() < argument_range.from() ||
+ word_range.to() > argument_range.from()) {
+ result.SetElementsInSecondSet();
+ }
+ } else if (word_range.from() >= argument_range.from() &&
+ word_range.to() <= argument_range.from()) {
+ result.SetElementsInBothSets();
+ result.SetElementsInFirstSet();
+ } else {
+ // There is overlap, and neither is a subrange of the other
+ result.SetElementsInFirstSet();
+ result.SetElementsInSecondSet();
+ result.SetElementsInBothSets();
+ }
+ if (result.NonTrivialIntersection()) {
+ // The result is as (im)precise as we can possibly make it.
+ return result;
+ }
+ // Progress the range(s) with minimal to-character.
+ uc16 word_to = word_range.to();
+ uc16 argument_to = argument_range.to();
+ if (argument_to <= word_to) {
+ j++;
+ if (j < range->length()) {
+ argument_range = range->at(j);
+ }
+ }
+ if (word_to <= argument_to) {
+ i += 2;
+ if (i < kWordRangeCount) {
+ word_range = CharacterRange(kWordRanges[i], kWordRanges[i + 1]);
+ }
+ }
+ }
+ // Check if anything wasn't compared in the loop.
+ if (i < kWordRangeCount) {
+ // word range contains something not in argument range.
+ result.SetElementsInSecondSet();
+ } else if (j < range->length()) {
+ // Argument range contains something not in word range.
+ result.SetElementsInFirstSet();
+ }
+
+ return result;
+}
+
+
static void AddUncanonicals(ZoneList<CharacterRange>* ranges,
int bottom,
int top) {
@@ -4119,6 +4263,287 @@ ZoneList<CharacterRange>* CharacterSet::ranges() {
}
+// Move a number of elements in a zonelist to another position
+// in the same list. Handles overlapping source and target areas.
+static void MoveRanges(ZoneList<CharacterRange>* list,
+ int from,
+ int to,
+ int count) {
+ // Ranges are potentially overlapping.
+ if (from < to) {
+ for (int i = count - 1; i >= 0; i--) {
+ list->at(to + i) = list->at(from + i);
+ }
+ } else {
+ for (int i = 0; i < count; i++) {
+ list->at(to + i) = list->at(from + i);
+ }
+ }
+}
+
+
+static int InsertRangeInCanonicalList(ZoneList<CharacterRange>* list,
+ int count,
+ CharacterRange insert) {
+ // Inserts a range into list[0..count[, which must be sorted
+ // by from value and non-overlapping and non-adjacent, using at most
+ // list[0..count] for the result. Returns the number of resulting
+ // canonicalized ranges. Inserting a range may collapse existing ranges into
+ // fewer ranges, so the return value can be anything in the range 1..count+1.
+ uc16 from = insert.from();
+ uc16 to = insert.to();
+ int start_pos = 0;
+ int end_pos = count;
+ for (int i = count - 1; i >= 0; i--) {
+ CharacterRange current = list->at(i);
+ if (current.from() > to + 1) {
+ end_pos = i;
+ } else if (current.to() + 1 < from) {
+ start_pos = i + 1;
+ break;
+ }
+ }
+
+ // Inserted range overlaps, or is adjacent to, ranges at positions
+ // [start_pos..end_pos[. Ranges before start_pos or at or after end_pos are
+ // not affected by the insertion.
+ // If start_pos == end_pos, the range must be inserted before start_pos.
+ // if start_pos < end_pos, the entire range from start_pos to end_pos
+ // must be merged with the insert range.
+
+ if (start_pos == end_pos) {
+ // Insert between existing ranges at position start_pos.
+ if (start_pos < count) {
+ MoveRanges(list, start_pos, start_pos + 1, count - start_pos);
+ }
+ list->at(start_pos) = insert;
+ return count + 1;
+ }
+ if (start_pos + 1 == end_pos) {
+ // Replace single existing range at position start_pos.
+ CharacterRange to_replace = list->at(start_pos);
+ int new_from = Min(to_replace.from(), from);
+ int new_to = Max(to_replace.to(), to);
+ list->at(start_pos) = CharacterRange(new_from, new_to);
+ return count;
+ }
+ // Replace a number of existing ranges from start_pos to end_pos - 1.
+ // Move the remaining ranges down.
+
+ int new_from = Min(list->at(start_pos).from(), from);
+ int new_to = Max(list->at(end_pos - 1).to(), to);
+ if (end_pos < count) {
+ MoveRanges(list, end_pos, start_pos + 1, count - end_pos);
+ }
+ list->at(start_pos) = CharacterRange(new_from, new_to);
+ return count - (end_pos - start_pos) + 1;
+}
+
+
+void CharacterSet::Canonicalize() {
+ // Special/default classes are always considered canonical. The result
+ // of calling ranges() will be sorted.
+ if (ranges_ == NULL) return;
+ CharacterRange::Canonicalize(ranges_);
+}
+
+
+void CharacterRange::Canonicalize(ZoneList<CharacterRange>* character_ranges) {
+ if (character_ranges->length() <= 1) return;
+ // Check whether ranges are already canonical (increasing, non-overlapping,
+ // non-adjacent).
+ int n = character_ranges->length();
+ int max = character_ranges->at(0).to();
+ int i = 1;
+ while (i < n) {
+ CharacterRange current = character_ranges->at(i);
+ if (current.from() <= max + 1) {
+ break;
+ }
+ max = current.to();
+ i++;
+ }
+ // Canonical until the i'th range. If that's all of them, we are done.
+ if (i == n) return;
+
+ // The ranges at index i and forward are not canonicalized. Make them so by
+ // doing the equivalent of insertion sort (inserting each into the previous
+ // list, in order).
+ // Notice that inserting a range can reduce the number of ranges in the
+ // result due to combining of adjacent and overlapping ranges.
+ int read = i; // Range to insert.
+ int num_canonical = i; // Length of canonicalized part of list.
+ do {
+ num_canonical = InsertRangeInCanonicalList(character_ranges,
+ num_canonical,
+ character_ranges->at(read));
+ read++;
+ } while (read < n);
+ character_ranges->Rewind(num_canonical);
+
+ ASSERT(CharacterRange::IsCanonical(character_ranges));
+}
+
+
+// Utility function for CharacterRange::Merge. Adds a range at the end of
+// a canonicalized range list, if necessary merging the range with the last
+// range of the list.
+static void AddRangeToSet(ZoneList<CharacterRange>* set, CharacterRange range) {
+ if (set == NULL) return;
+ ASSERT(set->length() == 0 || set->at(set->length() - 1).to() < range.from());
+ int n = set->length();
+ if (n > 0) {
+ CharacterRange lastRange = set->at(n - 1);
+ if (lastRange.to() == range.from() - 1) {
+ set->at(n - 1) = CharacterRange(lastRange.from(), range.to());
+ return;
+ }
+ }
+ set->Add(range);
+}
+
+
+static void AddRangeToSelectedSet(int selector,
+ ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* intersection_set,
+ CharacterRange range) {
+ switch (selector) {
+ case kInsideFirst:
+ AddRangeToSet(first_set, range);
+ break;
+ case kInsideSecond:
+ AddRangeToSet(second_set, range);
+ break;
+ case kInsideBoth:
+ AddRangeToSet(intersection_set, range);
+ break;
+ }
+}
+
+
+
+void CharacterRange::Merge(ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* first_set_only_out,
+ ZoneList<CharacterRange>* second_set_only_out,
+ ZoneList<CharacterRange>* both_sets_out) {
+ // Inputs are canonicalized.
+ ASSERT(CharacterRange::IsCanonical(first_set));
+ ASSERT(CharacterRange::IsCanonical(second_set));
+ // Outputs are empty, if applicable.
+ ASSERT(first_set_only_out == NULL || first_set_only_out->length() == 0);
+ ASSERT(second_set_only_out == NULL || second_set_only_out->length() == 0);
+ ASSERT(both_sets_out == NULL || both_sets_out->length() == 0);
+
+ // Merge sets by iterating through the lists in order of lowest "from" value,
+ // and putting intervals into one of three sets.
+
+ if (first_set->length() == 0) {
+ second_set_only_out->AddAll(*second_set);
+ return;
+ }
+ if (second_set->length() == 0) {
+ first_set_only_out->AddAll(*first_set);
+ return;
+ }
+ // Indices into input lists.
+ int i1 = 0;
+ int i2 = 0;
+ // Cache length of input lists.
+ int n1 = first_set->length();
+ int n2 = second_set->length();
+ // Current range. May be invalid if state is kInsideNone.
+ int from = 0;
+ int to = -1;
+ // Where current range comes from.
+ int state = kInsideNone;
+
+ while (i1 < n1 || i2 < n2) {
+ CharacterRange next_range;
+ int range_source;
+ if (i2 == n2 || first_set->at(i1).from() < second_set->at(i2).from()) {
+ next_range = first_set->at(i1++);
+ range_source = kInsideFirst;
+ } else {
+ next_range = second_set->at(i2++);
+ range_source = kInsideSecond;
+ }
+ if (to < next_range.from()) {
+ // Ranges disjoint: |current| |next|
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, to));
+ from = next_range.from();
+ to = next_range.to();
+ state = range_source;
+ } else {
+ if (from < next_range.from()) {
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, next_range.from()-1));
+ }
+ if (to < next_range.to()) {
+ // Ranges overlap: |current|
+ // |next|
+ AddRangeToSelectedSet(state | range_source,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(next_range.from(), to));
+ from = to + 1;
+ to = next_range.to();
+ state = range_source;
+ } else {
+ // Range included: |current| , possibly ending at same character.
+ // |next|
+ AddRangeToSelectedSet(
+ state | range_source,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(next_range.from(), next_range.to()));
+ from = next_range.to() + 1;
+ // If ranges end at same character, both ranges are consumed completely.
+ if (next_range.to() == to) state = kInsideNone;
+ }
+ }
+ }
+ AddRangeToSelectedSet(state,
+ first_set_only_out,
+ second_set_only_out,
+ both_sets_out,
+ CharacterRange(from, to));
+}
+
+
+void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
+ ZoneList<CharacterRange>* negated_ranges) {
+ ASSERT(CharacterRange::IsCanonical(ranges));
+ ASSERT_EQ(0, negated_ranges->length());
+ int range_count = ranges->length();
+ uc16 from = 0;
+ int i = 0;
+ if (range_count > 0 && ranges->at(0).from() == 0) {
+ from = ranges->at(0).to();
+ i = 1;
+ }
+ while (i < range_count) {
+ CharacterRange range = ranges->at(i);
+ negated_ranges->Add(CharacterRange(from + 1, range.from() - 1));
+ from = range.to();
+ i++;
+ }
+ if (from < String::kMaxUC16CharCode) {
+ negated_ranges->Add(CharacterRange(from + 1, String::kMaxUC16CharCode));
+ }
+}
+
+
// -------------------------------------------------------------------
// Interest propagation
@@ -4410,9 +4835,203 @@ void Analysis::VisitBackReference(BackReferenceNode* that) {
void Analysis::VisitAssertion(AssertionNode* that) {
EnsureAnalyzed(that->on_success());
+ AssertionNode::AssertionNodeType type = that->type();
+ if (type == AssertionNode::AT_BOUNDARY ||
+ type == AssertionNode::AT_NON_BOUNDARY) {
+ // Check if the following character is known to be a word character
+ // or known to not be a word character.
+ ZoneList<CharacterRange>* following_chars = that->FirstCharacterSet();
+
+ CharacterRange::Canonicalize(following_chars);
+
+ SetRelation word_relation =
+ CharacterRange::WordCharacterRelation(following_chars);
+ if (word_relation.ContainedIn()) {
+ // Following character is definitely a word character.
+ type = (type == AssertionNode::AT_BOUNDARY) ?
+ AssertionNode::AFTER_NONWORD_CHARACTER :
+ AssertionNode::AFTER_WORD_CHARACTER;
+ that->set_type(type);
+ } else if (word_relation.Disjoint()) {
+ // Following character is definitely *not* a word character.
+ type = (type == AssertionNode::AT_BOUNDARY) ?
+ AssertionNode::AFTER_WORD_CHARACTER :
+ AssertionNode::AFTER_NONWORD_CHARACTER;
+ that->set_type(type);
+ }
+ }
+}
+
+
+ZoneList<CharacterRange>* RegExpNode::FirstCharacterSet() {
+ if (first_character_set_ == NULL) {
+ if (ComputeFirstCharacterSet(kFirstCharBudget) < 0) {
+ // If we can't find an exact solution within the budget, we
+ // set the value to the set of every character, i.e., all characters
+ // are possible.
+ ZoneList<CharacterRange>* all_set = new ZoneList<CharacterRange>(1);
+ all_set->Add(CharacterRange::Everything());
+ first_character_set_ = all_set;
+ }
+ }
+ return first_character_set_;
+}
+
+
+int RegExpNode::ComputeFirstCharacterSet(int budget) {
+ // Default behavior is to not be able to determine the first character.
+ return kComputeFirstCharacterSetFail;
}
+int LoopChoiceNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ // Find loop min-iteration. It's the value of the guarded choice node
+ // with a GEQ guard, if any.
+ int min_repetition = 0;
+
+ for (int i = 0; i <= 1; i++) {
+ GuardedAlternative alternative = alternatives()->at(i);
+ ZoneList<Guard*>* guards = alternative.guards();
+ if (guards != NULL && guards->length() > 0) {
+ Guard* guard = guards->at(0);
+ if (guard->op() == Guard::GEQ) {
+ min_repetition = guard->value();
+ break;
+ }
+ }
+ }
+
+ budget = loop_node()->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ ZoneList<CharacterRange>* character_set =
+ loop_node()->first_character_set();
+ if (body_can_be_zero_length() || min_repetition == 0) {
+ budget = continue_node()->ComputeFirstCharacterSet(budget);
+ if (budget < 0) return budget;
+ ZoneList<CharacterRange>* body_set =
+ continue_node()->first_character_set();
+ ZoneList<CharacterRange>* union_set =
+ new ZoneList<CharacterRange>(Max(character_set->length(),
+ body_set->length()));
+ CharacterRange::Merge(character_set,
+ body_set,
+ union_set,
+ union_set,
+ union_set);
+ character_set = union_set;
+ }
+ set_first_character_set(character_set);
+ }
+ }
+ return budget;
+}
+
+
+int NegativeLookaheadChoiceNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ GuardedAlternative successor = this->alternatives()->at(1);
+ RegExpNode* successor_node = successor.node();
+ budget = successor_node->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ set_first_character_set(successor_node->first_character_set());
+ }
+ }
+ return budget;
+}
+
+
+// The first character set of an EndNode is unknowable. Just use the
+// default implementation that fails and returns all characters as possible.
+
+
+int AssertionNode::ComputeFirstCharacterSet(int budget) {
+ budget -= 1;
+ if (budget >= 0) {
+ switch (type_) {
+ case AT_END: {
+ set_first_character_set(new ZoneList<CharacterRange>(0));
+ break;
+ }
+ case AT_START:
+ case AT_BOUNDARY:
+ case AT_NON_BOUNDARY:
+ case AFTER_NEWLINE:
+ case AFTER_NONWORD_CHARACTER:
+ case AFTER_WORD_CHARACTER: {
+ ASSERT_NOT_NULL(on_success());
+ budget = on_success()->ComputeFirstCharacterSet(budget);
+ set_first_character_set(on_success()->first_character_set());
+ break;
+ }
+ }
+ }
+ return budget;
+}
+
+
+int ActionNode::ComputeFirstCharacterSet(int budget) {
+ if (type_ == POSITIVE_SUBMATCH_SUCCESS) return kComputeFirstCharacterSetFail;
+ budget--;
+ if (budget >= 0) {
+ ASSERT_NOT_NULL(on_success());
+ budget = on_success()->ComputeFirstCharacterSet(budget);
+ if (budget >= 0) {
+ set_first_character_set(on_success()->first_character_set());
+ }
+ }
+ return budget;
+}
+
+
+int BackReferenceNode::ComputeFirstCharacterSet(int budget) {
+ // We don't know anything about the first character of a backreference
+ // at this point.
+ return kComputeFirstCharacterSetFail;
+}
+
+
+int TextNode::ComputeFirstCharacterSet(int budget) {
+ budget--;
+ if (budget >= 0) {
+ ASSERT_NE(0, elements()->length());
+ TextElement text = elements()->at(0);
+ if (text.type == TextElement::ATOM) {
+ RegExpAtom* atom = text.data.u_atom;
+ ASSERT_NE(0, atom->length());
+ uc16 first_char = atom->data()[0];
+ ZoneList<CharacterRange>* range = new ZoneList<CharacterRange>(1);
+ range->Add(CharacterRange(first_char, first_char));
+ set_first_character_set(range);
+ } else {
+ ASSERT(text.type == TextElement::CHAR_CLASS);
+ RegExpCharacterClass* char_class = text.data.u_char_class;
+ if (char_class->is_negated()) {
+ ZoneList<CharacterRange>* ranges = char_class->ranges();
+ int length = ranges->length();
+ int new_length = length + 1;
+ if (length > 0) {
+ if (ranges->at(0).from() == 0) new_length--;
+ if (ranges->at(length - 1).to() == String::kMaxUC16CharCode) {
+ new_length--;
+ }
+ }
+ ZoneList<CharacterRange>* negated_ranges =
+ new ZoneList<CharacterRange>(new_length);
+ CharacterRange::Negate(ranges, negated_ranges);
+ set_first_character_set(negated_ranges);
+ } else {
+ set_first_character_set(char_class->ranges());
+ }
+ }
+ }
+ return budget;
+}
+
+
+
// -------------------------------------------------------------------
// Dispatch table construction
@@ -4471,7 +5090,6 @@ void DispatchTableConstructor::VisitAssertion(AssertionNode* that) {
}
-
static int CompareRangeByFrom(const CharacterRange* a,
const CharacterRange* b) {
return Compare<uc16>(a->from(), b->from());
@@ -4606,4 +5224,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(RegExpCompileData* data,
pattern);
}
+
+int OffsetsVector::static_offsets_vector_[
+ OffsetsVector::kStaticOffsetsVectorSize];
+
}} // namespace v8::internal
diff --git a/deps/v8/src/jsregexp.h b/deps/v8/src/jsregexp.h
index b6811194c..b99a89e1b 100644
--- a/deps/v8/src/jsregexp.h
+++ b/deps/v8/src/jsregexp.h
@@ -75,13 +75,6 @@ class RegExpImpl {
int index,
Handle<JSArray> lastMatchInfo);
- // Call RegExp.prototyp.exec(string) in a loop.
- // Used by String.prototype.match and String.prototype.replace.
- // This function calls the garbage collector if necessary.
- static Handle<Object> ExecGlobal(Handle<JSRegExp> regexp,
- Handle<String> subject,
- Handle<JSArray> lastMatchInfo);
-
// Prepares a JSRegExp object with Irregexp-specific data.
static void IrregexpPrepare(Handle<JSRegExp> re,
Handle<String> pattern,
@@ -108,13 +101,23 @@ class RegExpImpl {
int index,
Handle<JSArray> lastMatchInfo);
- // Offsets in the lastMatchInfo array.
+ // Array index in the lastMatchInfo array.
static const int kLastCaptureCount = 0;
static const int kLastSubject = 1;
static const int kLastInput = 2;
static const int kFirstCapture = 3;
static const int kLastMatchOverhead = 3;
+ // Direct offset into the lastMatchInfo array.
+ static const int kLastCaptureCountOffset =
+ FixedArray::kHeaderSize + kLastCaptureCount * kPointerSize;
+ static const int kLastSubjectOffset =
+ FixedArray::kHeaderSize + kLastSubject * kPointerSize;
+ static const int kLastInputOffset =
+ FixedArray::kHeaderSize + kLastInput * kPointerSize;
+ static const int kFirstCaptureOffset =
+ FixedArray::kHeaderSize + kFirstCapture * kPointerSize;
+
// Used to access the lastMatchInfo array.
static int GetCapture(FixedArray* array, int index) {
return Smi::cast(array->get(index + kFirstCapture))->value();
@@ -174,6 +177,57 @@ class RegExpImpl {
};
+// Represents the location of one element relative to the intersection of
+// two sets. Corresponds to the four areas of a Venn diagram.
+enum ElementInSetsRelation {
+ kInsideNone = 0,
+ kInsideFirst = 1,
+ kInsideSecond = 2,
+ kInsideBoth = 3
+};
+
+
+// Represents the relation of two sets.
+// Sets can be either disjoint, partially or fully overlapping, or equal.
+class SetRelation BASE_EMBEDDED {
+ public:
+ // Relation is represented by a bit saying whether there are elements in
+ // one set that is not in the other, and a bit saying that there are elements
+ // that are in both sets.
+
+ // Location of an element. Corresponds to the internal areas of
+ // a Venn diagram.
+ enum {
+ kInFirst = 1 << kInsideFirst,
+ kInSecond = 1 << kInsideSecond,
+ kInBoth = 1 << kInsideBoth
+ };
+ SetRelation() : bits_(0) {}
+ ~SetRelation() {}
+ // Add the existence of objects in a particular
+ void SetElementsInFirstSet() { bits_ |= kInFirst; }
+ void SetElementsInSecondSet() { bits_ |= kInSecond; }
+ void SetElementsInBothSets() { bits_ |= kInBoth; }
+ // Check the currently known relation of the sets (common functions only,
+ // for other combinations, use value() to get the bits and check them
+ // manually).
+ // Sets are completely disjoint.
+ bool Disjoint() { return (bits_ & kInBoth) == 0; }
+ // Sets are equal.
+ bool Equals() { return (bits_ & (kInFirst | kInSecond)) == 0; }
+ // First set contains second.
+ bool Contains() { return (bits_ & kInSecond) == 0; }
+ // Second set contains first.
+ bool ContainedIn() { return (bits_ & kInFirst) == 0; }
+ bool NonTrivialIntersection() {
+ return (bits_ == (kInFirst | kInSecond | kInBoth));
+ }
+ int value() { return bits_; }
+ private:
+ int bits_;
+};
+
+
class CharacterRange {
public:
CharacterRange() : from_(0), to_(0) { }
@@ -205,7 +259,39 @@ class CharacterRange {
Vector<const uc16> overlay,
ZoneList<CharacterRange>** included,
ZoneList<CharacterRange>** excluded);
-
+ // Whether a range list is in canonical form: Ranges ordered by from value,
+ // and ranges non-overlapping and non-adjacent.
+ static bool IsCanonical(ZoneList<CharacterRange>* ranges);
+ // Convert range list to canonical form. The characters covered by the ranges
+ // will still be the same, but no character is in more than one range, and
+ // adjacent ranges are merged. The resulting list may be shorter than the
+ // original, but cannot be longer.
+ static void Canonicalize(ZoneList<CharacterRange>* ranges);
+ // Check how the set of characters defined by a CharacterRange list relates
+ // to the set of word characters. List must be in canonical form.
+ static SetRelation WordCharacterRelation(ZoneList<CharacterRange>* ranges);
+ // Takes two character range lists (representing character sets) in canonical
+ // form and merges them.
+ // The characters that are only covered by the first set are added to
+ // first_set_only_out. the characters that are only in the second set are
+ // added to second_set_only_out, and the characters that are in both are
+ // added to both_sets_out.
+ // The pointers to first_set_only_out, second_set_only_out and both_sets_out
+ // should be to empty lists, but they need not be distinct, and may be NULL.
+ // If NULL, the characters are dropped, and if two arguments are the same
+ // pointer, the result is the union of the two sets that would be created
+ // if the pointers had been distinct.
+ // This way, the Merge function can compute all the usual set operations:
+ // union (all three out-sets are equal), intersection (only both_sets_out is
+ // non-NULL), and set difference (only first_set is non-NULL).
+ static void Merge(ZoneList<CharacterRange>* first_set,
+ ZoneList<CharacterRange>* second_set,
+ ZoneList<CharacterRange>* first_set_only_out,
+ ZoneList<CharacterRange>* second_set_only_out,
+ ZoneList<CharacterRange>* both_sets_out);
+ // Negate the contents of a character range in canonical form.
+ static void Negate(ZoneList<CharacterRange>* src,
+ ZoneList<CharacterRange>* dst);
static const int kRangeCanonicalizeMax = 0x346;
static const int kStartMarker = (1 << 24);
static const int kPayloadMask = (1 << 24) - 1;
@@ -479,7 +565,7 @@ class QuickCheckDetails {
class RegExpNode: public ZoneObject {
public:
- RegExpNode() : trace_count_(0) { }
+ RegExpNode() : first_character_set_(NULL), trace_count_(0) { }
virtual ~RegExpNode();
virtual void Accept(NodeVisitor* visitor) = 0;
// Generates a goto to this node or actually generates the code at this point.
@@ -530,8 +616,29 @@ class RegExpNode: public ZoneObject {
SiblingList* siblings() { return &siblings_; }
void set_siblings(SiblingList* other) { siblings_ = *other; }
+ // Return the set of possible next characters recognized by the regexp
+ // (or a safe subset, potentially the set of all characters).
+ ZoneList<CharacterRange>* FirstCharacterSet();
+
+ // Compute (if possible within the budget of traversed nodes) the
+ // possible first characters of the input matched by this node and
+ // its continuation. Returns the remaining budget after the computation.
+ // If the budget is spent, the result is negative, and the cached
+ // first_character_set_ value isn't set.
+ virtual int ComputeFirstCharacterSet(int budget);
+
+ // Get and set the cached first character set value.
+ ZoneList<CharacterRange>* first_character_set() {
+ return first_character_set_;
+ }
+ void set_first_character_set(ZoneList<CharacterRange>* character_set) {
+ first_character_set_ = character_set;
+ }
+
protected:
enum LimitResult { DONE, CONTINUE };
+ static const int kComputeFirstCharacterSetFail = -1;
+
LimitResult LimitVersions(RegExpCompiler* compiler, Trace* trace);
// Returns a sibling of this node whose interests and assumptions
@@ -552,9 +659,11 @@ class RegExpNode: public ZoneObject {
virtual RegExpNode* Clone() = 0;
private:
+ static const int kFirstCharBudget = 10;
Label label_;
NodeInfo info_;
SiblingList siblings_;
+ ZoneList<CharacterRange>* first_character_set_;
// This variable keeps track of how many times code has been generated for
// this node (in different traces). We don't keep track of where the
// generated code is located unless the code is generated at the start of
@@ -645,7 +754,7 @@ class ActionNode: public SeqRegExpNode {
// TODO(erikcorry): We should allow some action nodes in greedy loops.
virtual int GreedyLoopTextLength() { return kNodeIsTooComplexForGreedyLoops; }
virtual ActionNode* Clone() { return new ActionNode(*this); }
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
union {
struct {
@@ -711,7 +820,7 @@ class TextNode: public SeqRegExpNode {
return result;
}
void CalculateOffsets();
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
enum TextEmitPassType {
NON_ASCII_MATCH, // Check for characters that can't match.
@@ -741,7 +850,12 @@ class AssertionNode: public SeqRegExpNode {
AT_START,
AT_BOUNDARY,
AT_NON_BOUNDARY,
- AFTER_NEWLINE
+ AFTER_NEWLINE,
+ // Types not directly expressible in regexp syntax.
+ // Used for modifying a boundary node if its following character is
+ // known to be word and/or non-word.
+ AFTER_NONWORD_CHARACTER,
+ AFTER_WORD_CHARACTER
};
static AssertionNode* AtEnd(RegExpNode* on_success) {
return new AssertionNode(AT_END, on_success);
@@ -765,8 +879,10 @@ class AssertionNode: public SeqRegExpNode {
RegExpCompiler* compiler,
int filled_in,
bool not_at_start);
+ virtual int ComputeFirstCharacterSet(int budget);
virtual AssertionNode* Clone() { return new AssertionNode(*this); }
AssertionNodeType type() { return type_; }
+ void set_type(AssertionNodeType type) { type_ = type; }
private:
AssertionNode(AssertionNodeType t, RegExpNode* on_success)
: SeqRegExpNode(on_success), type_(t) { }
@@ -794,7 +910,7 @@ class BackReferenceNode: public SeqRegExpNode {
return;
}
virtual BackReferenceNode* Clone() { return new BackReferenceNode(*this); }
-
+ virtual int ComputeFirstCharacterSet(int budget);
private:
int start_reg_;
int end_reg_;
@@ -816,7 +932,6 @@ class EndNode: public RegExpNode {
UNREACHABLE();
}
virtual EndNode* Clone() { return new EndNode(*this); }
-
private:
Action action_;
};
@@ -950,6 +1065,7 @@ class NegativeLookaheadChoiceNode: public ChoiceNode {
// characters, but on a negative lookahead the negative branch did not take
// part in that calculation (EatsAtLeast) so the assumptions don't hold.
virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
+ virtual int ComputeFirstCharacterSet(int budget);
};
@@ -968,6 +1084,7 @@ class LoopChoiceNode: public ChoiceNode {
RegExpCompiler* compiler,
int characters_filled_in,
bool not_at_start);
+ virtual int ComputeFirstCharacterSet(int budget);
virtual LoopChoiceNode* Clone() { return new LoopChoiceNode(*this); }
RegExpNode* loop_node() { return loop_node_; }
RegExpNode* continue_node() { return continue_node_; }
@@ -1123,7 +1240,7 @@ class Trace {
void set_backtrack(Label* backtrack) { backtrack_ = backtrack; }
void set_stop_node(RegExpNode* node) { stop_node_ = node; }
void set_loop_label(Label* label) { loop_label_ = label; }
- void set_characters_preloaded(int cpre) { characters_preloaded_ = cpre; }
+ void set_characters_preloaded(int count) { characters_preloaded_ = count; }
void set_bound_checked_up_to(int to) { bound_checked_up_to_ = to; }
void set_flush_budget(int to) { flush_budget_ = to; }
void set_quick_check_performed(QuickCheckDetails* d) {
@@ -1283,6 +1400,40 @@ class RegExpEngine: public AllStatic {
};
+class OffsetsVector {
+ public:
+ inline OffsetsVector(int num_registers)
+ : offsets_vector_length_(num_registers) {
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ vector_ = NewArray<int>(offsets_vector_length_);
+ } else {
+ vector_ = static_offsets_vector_;
+ }
+ }
+ inline ~OffsetsVector() {
+ if (offsets_vector_length_ > kStaticOffsetsVectorSize) {
+ DeleteArray(vector_);
+ vector_ = NULL;
+ }
+ }
+ inline int* vector() { return vector_; }
+ inline int length() { return offsets_vector_length_; }
+
+ static const int kStaticOffsetsVectorSize = 50;
+
+ private:
+ static Address static_offsets_vector_address() {
+ return reinterpret_cast<Address>(&static_offsets_vector_);
+ }
+
+ int* vector_;
+ int offsets_vector_length_;
+ static int static_offsets_vector_[kStaticOffsetsVectorSize];
+
+ friend class ExternalReference;
+};
+
+
} } // namespace v8::internal
#endif // V8_JSREGEXP_H_
diff --git a/deps/v8/src/jump-target.h b/deps/v8/src/jump-target.h
index 0933ee781..dd291c6b3 100644
--- a/deps/v8/src/jump-target.h
+++ b/deps/v8/src/jump-target.h
@@ -112,7 +112,8 @@ class JumpTarget : public ZoneObject { // Shadows are dynamically allocated.
// Emit a conditional branch to the target. There must be a current
// frame at the branch. The current frame will fall through to the
- // code after the branch.
+ // code after the branch. The arg is a result that is live both at
+ // the target and the fall-through.
virtual void Branch(Condition cc, Hint hint = no_hint);
virtual void Branch(Condition cc, Result* arg, Hint hint = no_hint);
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index 5b06099a8..1e436a0a1 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -92,12 +92,13 @@ macro IS_ERROR(arg) = (%_ClassOf(arg) === 'Error');
macro IS_SCRIPT(arg) = (%_ClassOf(arg) === 'Script');
macro IS_ARGUMENTS(arg) = (%_ClassOf(arg) === 'Arguments');
macro IS_GLOBAL(arg) = (%_ClassOf(arg) === 'global');
-macro FLOOR(arg) = %Math_floor(arg);
+macro FLOOR(arg) = $floor(arg);
# Inline macros. Use %IS_VAR to make sure arg is evaluated only once.
macro NUMBER_IS_NAN(arg) = (!%_IsSmi(%IS_VAR(arg)) && !(arg == arg));
macro TO_INTEGER(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInteger(arg));
-macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : ToInt32(arg));
+macro TO_INT32(arg) = (%_IsSmi(%IS_VAR(arg)) ? arg : (arg >> 0));
+macro TO_UINT32(arg) = (arg >>> 0);
# Macros implemented in Python.
python macro CHAR_CODE(str) = ord(str[1]);
@@ -117,6 +118,14 @@ macro NUMBER_OF_CAPTURES(array) = ((array)[0]);
# Gets the value of a Date object. If arg is not a Date object
# a type error is thrown.
macro DATE_VALUE(arg) = (%_ClassOf(arg) === 'Date' ? %_ValueOf(arg) : ThrowDateTypeError());
+macro DAY(time) = ($floor(time / 86400000));
+macro MONTH_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).month);
+macro DATE_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).date);
+macro YEAR_FROM_TIME(time) = (FromJulianDay(($floor(time / 86400000)) + 2440588).year);
+macro HOUR_FROM_TIME(time) = (Modulo($floor(time / 3600000), 24));
+macro MIN_FROM_TIME(time) = (Modulo($floor(time / 60000), 60));
+macro SEC_FROM_TIME(time) = (Modulo($floor(time / 1000), 60));
+macro MS_FROM_TIME(time) = (Modulo(time, 1000));
# Last input and last subject of regexp matches.
macro LAST_SUBJECT(array) = ((array)[1]);
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 7a5353745..e284b4264 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -116,6 +116,8 @@ void MarkCompactCollector::Prepare(GCTracer* tracer) {
compact_on_next_gc_ = false;
if (FLAG_never_compact) compacting_collection_ = false;
+ if (!Heap::map_space()->MapPointersEncodable())
+ compacting_collection_ = false;
if (FLAG_collect_maps) CreateBackPointers();
#ifdef DEBUG
@@ -789,7 +791,7 @@ void MarkCompactCollector::ClearNonLiveTransitions() {
// back pointers, reversing them all at once. This allows us to find
// those maps with map transitions that need to be nulled, and only
// scan the descriptor arrays of those maps, not all maps.
- // All of these actions are carried out only on maps of JSObects
+ // All of these actions are carried out only on maps of JSObjects
// and related subtypes.
while (map_iterator.has_next()) {
Map* map = reinterpret_cast<Map*>(map_iterator.next());
@@ -1166,7 +1168,7 @@ void MarkCompactCollector::DeallocateCodeBlock(Address start,
void MarkCompactCollector::DeallocateMapBlock(Address start,
int size_in_bytes) {
- // Objects in map space are frequently assumed to have size Map::kSize and a
+ // Objects in map space are assumed to have size Map::kSize and a
// valid map in their first word. Thus, we break the free block up into
// chunks and free them separately.
ASSERT(size_in_bytes % Map::kSize == 0);
@@ -1240,6 +1242,225 @@ void MarkCompactCollector::EncodeForwardingAddresses() {
}
+class MapIterator : public HeapObjectIterator {
+ public:
+ MapIterator() : HeapObjectIterator(Heap::map_space(), &SizeCallback) { }
+
+ explicit MapIterator(Address start)
+ : HeapObjectIterator(Heap::map_space(), start, &SizeCallback) { }
+
+ private:
+ static int SizeCallback(HeapObject* unused) {
+ USE(unused);
+ return Map::kSize;
+ }
+};
+
+
+class MapCompact {
+ public:
+ explicit MapCompact(int live_maps)
+ : live_maps_(live_maps),
+ to_evacuate_start_(Heap::map_space()->TopAfterCompaction(live_maps)),
+ map_to_evacuate_it_(to_evacuate_start_),
+ first_map_to_evacuate_(
+ reinterpret_cast<Map*>(HeapObject::FromAddress(to_evacuate_start_))) {
+ }
+
+ void CompactMaps() {
+ // As we know the number of maps to evacuate beforehand,
+ // we stop then there is no more vacant maps.
+ for (Map* next_vacant_map = NextVacantMap();
+ next_vacant_map;
+ next_vacant_map = NextVacantMap()) {
+ EvacuateMap(next_vacant_map, NextMapToEvacuate());
+ }
+
+#ifdef DEBUG
+ CheckNoMapsToEvacuate();
+#endif
+ }
+
+ void UpdateMapPointersInRoots() {
+ Heap::IterateRoots(&map_updating_visitor_, VISIT_ONLY_STRONG);
+ GlobalHandles::IterateWeakRoots(&map_updating_visitor_);
+ }
+
+ void FinishMapSpace() {
+ // Iterate through to space and finish move.
+ MapIterator it;
+ HeapObject* o = it.next();
+ for (; o != first_map_to_evacuate_; o = it.next()) {
+ Map* map = reinterpret_cast<Map*>(o);
+ ASSERT(!map->IsMarked());
+ ASSERT(!map->IsOverflowed());
+ ASSERT(map->IsMap());
+ Heap::UpdateRSet(map);
+ }
+ }
+
+ void UpdateMapPointersInPagedSpace(PagedSpace* space) {
+ ASSERT(space != Heap::map_space());
+
+ PageIterator it(space, PageIterator::PAGES_IN_USE);
+ while (it.has_next()) {
+ Page* p = it.next();
+ UpdateMapPointersInRange(p->ObjectAreaStart(), p->AllocationTop());
+ }
+ }
+
+ void UpdateMapPointersInNewSpace() {
+ NewSpace* space = Heap::new_space();
+ UpdateMapPointersInRange(space->bottom(), space->top());
+ }
+
+ void UpdateMapPointersInLargeObjectSpace() {
+ LargeObjectIterator it(Heap::lo_space());
+ while (true) {
+ if (!it.has_next()) break;
+ UpdateMapPointersInObject(it.next());
+ }
+ }
+
+ void Finish() {
+ Heap::map_space()->FinishCompaction(to_evacuate_start_, live_maps_);
+ }
+
+ private:
+ int live_maps_;
+ Address to_evacuate_start_;
+ MapIterator vacant_map_it_;
+ MapIterator map_to_evacuate_it_;
+ Map* first_map_to_evacuate_;
+
+ // Helper class for updating map pointers in HeapObjects.
+ class MapUpdatingVisitor: public ObjectVisitor {
+ public:
+ void VisitPointer(Object** p) {
+ UpdateMapPointer(p);
+ }
+
+ void VisitPointers(Object** start, Object** end) {
+ for (Object** p = start; p < end; p++) UpdateMapPointer(p);
+ }
+
+ private:
+ void UpdateMapPointer(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+ HeapObject* old_map = reinterpret_cast<HeapObject*>(*p);
+
+ // Moved maps are tagged with overflowed map word. They are the only
+ // objects those map word is overflowed as marking is already complete.
+ MapWord map_word = old_map->map_word();
+ if (!map_word.IsOverflowed()) return;
+
+ *p = GetForwardedMap(map_word);
+ }
+ };
+
+ static MapUpdatingVisitor map_updating_visitor_;
+
+ static Map* NextMap(MapIterator* it, HeapObject* last, bool live) {
+ while (true) {
+ ASSERT(it->has_next());
+ HeapObject* next = it->next();
+ if (next == last)
+ return NULL;
+ ASSERT(!next->IsOverflowed());
+ ASSERT(!next->IsMarked());
+ ASSERT(next->IsMap() || FreeListNode::IsFreeListNode(next));
+ if (next->IsMap() == live)
+ return reinterpret_cast<Map*>(next);
+ }
+ }
+
+ Map* NextVacantMap() {
+ Map* map = NextMap(&vacant_map_it_, first_map_to_evacuate_, false);
+ ASSERT(map == NULL || FreeListNode::IsFreeListNode(map));
+ return map;
+ }
+
+ Map* NextMapToEvacuate() {
+ Map* map = NextMap(&map_to_evacuate_it_, NULL, true);
+ ASSERT(map != NULL);
+ ASSERT(map->IsMap());
+ return map;
+ }
+
+ static void EvacuateMap(Map* vacant_map, Map* map_to_evacuate) {
+ ASSERT(FreeListNode::IsFreeListNode(vacant_map));
+ ASSERT(map_to_evacuate->IsMap());
+
+ memcpy(
+ reinterpret_cast<void*>(vacant_map->address()),
+ reinterpret_cast<void*>(map_to_evacuate->address()),
+ Map::kSize);
+ ASSERT(vacant_map->IsMap()); // Due to memcpy above.
+
+ MapWord forwarding_map_word = MapWord::FromMap(vacant_map);
+ forwarding_map_word.SetOverflow();
+ map_to_evacuate->set_map_word(forwarding_map_word);
+
+ ASSERT(map_to_evacuate->map_word().IsOverflowed());
+ ASSERT(GetForwardedMap(map_to_evacuate->map_word()) == vacant_map);
+ }
+
+ static Map* GetForwardedMap(MapWord map_word) {
+ ASSERT(map_word.IsOverflowed());
+ map_word.ClearOverflow();
+ Map* new_map = map_word.ToMap();
+ ASSERT_MAP_ALIGNED(new_map->address());
+ return new_map;
+ }
+
+ static int UpdateMapPointersInObject(HeapObject* obj) {
+ ASSERT(!obj->IsMarked());
+ Map* map = obj->map();
+ ASSERT(Heap::map_space()->Contains(map));
+ MapWord map_word = map->map_word();
+ ASSERT(!map_word.IsMarked());
+ if (map_word.IsOverflowed()) {
+ Map* new_map = GetForwardedMap(map_word);
+ ASSERT(Heap::map_space()->Contains(new_map));
+ obj->set_map(new_map);
+
+#ifdef DEBUG
+ if (FLAG_gc_verbose) {
+ PrintF("update %p : %p -> %p\n", obj->address(),
+ map, new_map);
+ }
+#endif
+ }
+
+ int size = obj->SizeFromMap(map);
+ obj->IterateBody(map->instance_type(), size, &map_updating_visitor_);
+ return size;
+ }
+
+ static void UpdateMapPointersInRange(Address start, Address end) {
+ HeapObject* object;
+ int size;
+ for (Address current = start; current < end; current += size) {
+ object = HeapObject::FromAddress(current);
+ size = UpdateMapPointersInObject(object);
+ ASSERT(size > 0);
+ }
+ }
+
+#ifdef DEBUG
+ void CheckNoMapsToEvacuate() {
+ if (!FLAG_enable_slow_asserts)
+ return;
+
+ while (map_to_evacuate_it_.has_next())
+ ASSERT(FreeListNode::IsFreeListNode(map_to_evacuate_it_.next()));
+ }
+#endif
+};
+
+MapCompact::MapUpdatingVisitor MapCompact::map_updating_visitor_;
+
+
void MarkCompactCollector::SweepSpaces() {
ASSERT(state_ == SWEEP_SPACES);
ASSERT(!IsCompacting());
@@ -1254,6 +1475,26 @@ void MarkCompactCollector::SweepSpaces() {
SweepSpace(Heap::cell_space(), &DeallocateCellBlock);
SweepSpace(Heap::new_space());
SweepSpace(Heap::map_space(), &DeallocateMapBlock);
+ int live_maps = Heap::map_space()->Size() / Map::kSize;
+ ASSERT(live_map_objects_ == live_maps);
+
+ if (Heap::map_space()->NeedsCompaction(live_maps)) {
+ MapCompact map_compact(live_maps);
+
+ map_compact.CompactMaps();
+ map_compact.UpdateMapPointersInRoots();
+
+ map_compact.FinishMapSpace();
+ PagedSpaces spaces;
+ while (PagedSpace* space = spaces.next()) {
+ if (space == Heap::map_space()) continue;
+ map_compact.UpdateMapPointersInPagedSpace(space);
+ }
+ map_compact.UpdateMapPointersInNewSpace();
+ map_compact.UpdateMapPointersInLargeObjectSpace();
+
+ map_compact.Finish();
+ }
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 2da2b1f75..02aedb3ac 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -92,7 +92,15 @@ class MarkCompactCollector: public AllStatic {
static bool HasCompacted() { return compacting_collection_; }
// True after the Prepare phase if the compaction is taking place.
- static bool IsCompacting() { return compacting_collection_; }
+ static bool IsCompacting() {
+#ifdef DEBUG
+ // For the purposes of asserts we don't want this to keep returning true
+ // after the collection is completed.
+ return state_ != IDLE && compacting_collection_;
+#else
+ return compacting_collection_;
+#endif
+ }
// The count of the number of objects left marked at the end of the last
// completed full GC (expected to be zero).
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 07f729505..d804648f5 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -84,7 +84,7 @@ function MathCeil(x) {
// ECMA 262 - 15.8.2.7
function MathCos(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_cos(x);
+ return %Math_cos(x);
}
// ECMA 262 - 15.8.2.8
@@ -98,12 +98,12 @@ function MathFloor(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
// It's more common to call this with a positive number that's out
// of range than negative numbers; check the upper bound first.
- if (x <= 0x7FFFFFFF && x > 0) {
+ if (x < 0x80000000 && x > 0) {
// Numbers in the range [0, 2^31) can be floored by converting
// them to an unsigned 32-bit value using the shift operator.
// We avoid doing so for -0, because the result of Math.floor(-0)
// has to be -0, which wouldn't be the case with the shift.
- return x << 0;
+ return TO_UINT32(x);
} else {
return %Math_floor(x);
}
@@ -176,7 +176,7 @@ function MathRound(x) {
// ECMA 262 - 15.8.2.16
function MathSin(x) {
if (!IS_NUMBER(x)) x = ToNumber(x);
- return %_Math_sin(x);
+ return %Math_sin(x);
}
// ECMA 262 - 15.8.2.17
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index bdcbf918e..d3c8fcca4 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -178,7 +178,9 @@ function FormatMessage(message) {
result_not_primitive: "Result of %0 must be a primitive, was %1",
invalid_json: "String '%0' is not valid JSON",
circular_structure: "Converting circular structure to JSON",
- object_keys_non_object: "Object.keys called on non-object"
+ object_keys_non_object: "Object.keys called on non-object",
+ object_get_prototype_non_object: "Object.getPrototypeOf called on non-object",
+ array_indexof_not_defined: "Array.getIndexOf: Argument undefined"
};
}
var format = kMessages[message.type];
diff --git a/deps/v8/src/mksnapshot.cc b/deps/v8/src/mksnapshot.cc
index eb743f81f..b889e2e20 100644
--- a/deps/v8/src/mksnapshot.cc
+++ b/deps/v8/src/mksnapshot.cc
@@ -151,6 +151,7 @@ int main(int argc, char** argv) {
}
i::Serializer::Enable();
Persistent<Context> context = v8::Context::New();
+ ASSERT(!context.IsEmpty());
// Make sure all builtin scripts are cached.
{ HandleScope scope;
for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 36f65eee8..7e77e8164 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -587,7 +587,6 @@ static const char* TypeToString(InstanceType type) {
case JS_BUILTINS_OBJECT_TYPE: return "JS_BUILTINS_OBJECT";
case JS_GLOBAL_PROXY_TYPE: return "JS_GLOBAL_PROXY";
case PROXY_TYPE: return "PROXY";
- case SMI_TYPE: return "SMI";
#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return #NAME;
STRUCT_LIST(MAKE_STRUCT_CASE)
#undef MAKE_STRUCT_CASE
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 45690381f..300334246 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -150,8 +150,12 @@ bool Object::IsString() {
bool Object::IsSymbol() {
if (!this->IsHeapObject()) return false;
uint32_t type = HeapObject::cast(this)->map()->instance_type();
- return (type & (kIsNotStringMask | kIsSymbolMask)) ==
- (kStringTag | kSymbolTag);
+ // Because the symbol tag is non-zero and no non-string types have the
+ // symbol bit set we can test for symbols with a very simple test
+ // operation.
+ ASSERT(kSymbolTag != 0);
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ return (type & kIsSymbolMask) != 0;
}
@@ -226,7 +230,8 @@ StringShape::StringShape(InstanceType t)
bool StringShape::IsSymbol() {
ASSERT(valid());
- return (type_ & kIsSymbolMask) == kSymbolTag;
+ ASSERT(kSymbolTag != 0);
+ return (type_ & kIsSymbolMask) != 0;
}
@@ -336,8 +341,8 @@ bool Object::IsExternalArray() {
return false;
InstanceType instance_type =
HeapObject::cast(this)->map()->instance_type();
- return (instance_type >= EXTERNAL_BYTE_ARRAY_TYPE &&
- instance_type <= EXTERNAL_FLOAT_ARRAY_TYPE);
+ return (instance_type >= FIRST_EXTERNAL_ARRAY_TYPE &&
+ instance_type <= LAST_EXTERNAL_ARRAY_TYPE);
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 52f1f9af6..118c4891d 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -6834,43 +6834,36 @@ void HashTable<Shape, Key>::IterateElements(ObjectVisitor* v) {
template<typename Shape, typename Key>
-Object* HashTable<Shape, Key>::Allocate(
- int at_least_space_for) {
+Object* HashTable<Shape, Key>::Allocate(int at_least_space_for) {
int capacity = RoundUpToPowerOf2(at_least_space_for);
- if (capacity < 4) capacity = 4; // Guarantee min capacity.
+ if (capacity < 4) {
+ capacity = 4; // Guarantee min capacity.
+ } else if (capacity > HashTable::kMaxCapacity) {
+ return Failure::OutOfMemoryException();
+ }
+
Object* obj = Heap::AllocateHashTable(EntryToIndex(capacity));
if (!obj->IsFailure()) {
HashTable::cast(obj)->SetNumberOfElements(0);
+ HashTable::cast(obj)->SetNumberOfDeletedElements(0);
HashTable::cast(obj)->SetCapacity(capacity);
}
return obj;
}
-
-// Find entry for key otherwise return -1.
+// Find entry for key otherwise return kNotFound.
template<typename Shape, typename Key>
int HashTable<Shape, Key>::FindEntry(Key key) {
- uint32_t nof = NumberOfElements();
- if (nof == 0) return kNotFound; // Bail out if empty.
-
uint32_t capacity = Capacity();
- uint32_t hash = Shape::Hash(key);
- uint32_t entry = GetProbe(hash, 0, capacity);
-
- Object* element = KeyAt(entry);
- uint32_t passed_elements = 0;
- if (!element->IsNull()) {
- if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
- if (++passed_elements == nof) return kNotFound;
- }
- for (uint32_t i = 1; !element->IsUndefined(); i++) {
- entry = GetProbe(hash, i, capacity);
- element = KeyAt(entry);
- if (!element->IsNull()) {
- if (!element->IsUndefined() && Shape::IsMatch(key, element)) return entry;
- if (++passed_elements == nof) return kNotFound;
- }
+ uint32_t entry = FirstProbe(Shape::Hash(key), capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element->IsUndefined()) break; // Empty entry.
+ if (!element->IsNull() && Shape::IsMatch(key, element)) return entry;
+ entry = NextProbe(entry, count++, capacity);
}
return kNotFound;
}
@@ -6880,8 +6873,12 @@ template<typename Shape, typename Key>
Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
int capacity = Capacity();
int nof = NumberOfElements() + n;
- // Make sure 50% is free
- if (nof + (nof >> 1) <= capacity) return this;
+ int nod = NumberOfDeletedElements();
+ // Return if:
+ // 50% is still free after adding n elements and
+ // at most 50% of the free elements are deleted elements.
+ if ((nof + (nof >> 1) <= capacity) &&
+ (nod <= (capacity - nof) >> 1)) return this;
Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj;
@@ -6908,21 +6905,23 @@ Object* HashTable<Shape, Key>::EnsureCapacity(int n, Key key) {
}
}
table->SetNumberOfElements(NumberOfElements());
+ table->SetNumberOfDeletedElements(0);
return table;
}
+
template<typename Shape, typename Key>
uint32_t HashTable<Shape, Key>::FindInsertionEntry(uint32_t hash) {
uint32_t capacity = Capacity();
- uint32_t entry = GetProbe(hash, 0, capacity);
- Object* element = KeyAt(entry);
-
- for (uint32_t i = 1; !(element->IsUndefined() || element->IsNull()); i++) {
- entry = GetProbe(hash, i, capacity);
- element = KeyAt(entry);
+ uint32_t entry = FirstProbe(hash, capacity);
+ uint32_t count = 1;
+ // EnsureCapacity will guarantee the hash table is never full.
+ while (true) {
+ Object* element = KeyAt(entry);
+ if (element->IsUndefined() || element->IsNull()) break;
+ entry = NextProbe(entry, count++, capacity);
}
-
return entry;
}
@@ -7001,6 +7000,10 @@ int Dictionary<NumberDictionaryShape, uint32_t>::NumberOfEnumElements();
template
int Dictionary<StringDictionaryShape, String*>::NumberOfEnumElements();
+template
+int HashTable<NumberDictionaryShape, uint32_t>::FindEntry(uint32_t);
+
+
// Collates undefined and unexisting elements below limit from position
// zero of the elements. The object stays in Dictionary mode.
Object* JSObject::PrepareSlowElementsForSort(uint32_t limit) {
@@ -7703,7 +7706,7 @@ void NumberDictionary::RemoveNumberEntries(uint32_t from, uint32_t to) {
}
// Update the number of elements.
- SetNumberOfElements(NumberOfElements() - removed_entries);
+ ElementsRemoved(removed_entries);
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 03e2ca19b..5d088e523 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -204,14 +204,13 @@ enum PropertyNormalizationMode {
// instance_type is JS_OBJECT_TYPE.
//
// The names of the string instance types are intended to systematically
-// mirror their encoding in the instance_type field of the map. The length
-// (SHORT, MEDIUM, or LONG) is always mentioned. The default encoding is
-// considered TWO_BYTE. It is not mentioned in the name. ASCII encoding is
-// mentioned explicitly in the name. Likewise, the default representation is
-// considered sequential. It is not mentioned in the name. The other
-// representations (eg, CONS, EXTERNAL) are explicitly mentioned.
-// Finally, the string is either a SYMBOL_TYPE (if it is a symbol) or a
-// STRING_TYPE (if it is not a symbol).
+// mirror their encoding in the instance_type field of the map. The default
+// encoding is considered TWO_BYTE. It is not mentioned in the name. ASCII
+// encoding is mentioned explicitly in the name. Likewise, the default
+// representation is considered sequential. It is not mentioned in the
+// name. The other representations (eg, CONS, EXTERNAL) are explicitly
+// mentioned. Finally, the string is either a SYMBOL_TYPE (if it is a
+// symbol) or a STRING_TYPE (if it is not a symbol).
//
// NOTE: The following things are some that depend on the string types having
// instance_types that are less than those of all other types:
@@ -237,11 +236,11 @@ enum PropertyNormalizationMode {
V(PRIVATE_EXTERNAL_ASCII_STRING_TYPE) \
\
V(MAP_TYPE) \
- V(HEAP_NUMBER_TYPE) \
- V(FIXED_ARRAY_TYPE) \
V(CODE_TYPE) \
V(JS_GLOBAL_PROPERTY_CELL_TYPE) \
V(ODDBALL_TYPE) \
+ \
+ V(HEAP_NUMBER_TYPE) \
V(PROXY_TYPE) \
V(BYTE_ARRAY_TYPE) \
V(PIXEL_ARRAY_TYPE) \
@@ -257,6 +256,7 @@ enum PropertyNormalizationMode {
V(EXTERNAL_FLOAT_ARRAY_TYPE) \
V(FILLER_TYPE) \
\
+ V(FIXED_ARRAY_TYPE) \
V(ACCESSOR_INFO_TYPE) \
V(ACCESS_CHECK_INFO_TYPE) \
V(INTERCEPTOR_INFO_TYPE) \
@@ -383,11 +383,12 @@ const uint32_t kIsNotStringMask = 0x80;
const uint32_t kStringTag = 0x0;
const uint32_t kNotStringTag = 0x80;
-// If bit 7 is clear, bit 5 indicates that the string is a symbol (if set) or
-// not (if cleared).
-const uint32_t kIsSymbolMask = 0x20;
+// Bit 6 indicates that the object is a symbol (if set) or not (if cleared).
+// There are not enough types that the non-string types (with bit 7 set) can
+// have bit 6 set too.
+const uint32_t kIsSymbolMask = 0x40;
const uint32_t kNotSymbolTag = 0x0;
-const uint32_t kSymbolTag = 0x20;
+const uint32_t kSymbolTag = 0x40;
// If bit 7 is clear then bit 2 indicates whether the string consists of
// two-byte characters or one-byte characters.
@@ -418,6 +419,7 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
+ // String types.
SYMBOL_TYPE = kSymbolTag | kSeqStringTag,
ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kSymbolTag | kConsStringTag,
@@ -433,56 +435,66 @@ enum InstanceType {
EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
- MAP_TYPE = kNotStringTag,
- HEAP_NUMBER_TYPE,
- FIXED_ARRAY_TYPE,
+ // Objects allocated in their own spaces (never in new space).
+ MAP_TYPE = kNotStringTag, // FIRST_NONSTRING_TYPE
CODE_TYPE,
ODDBALL_TYPE,
JS_GLOBAL_PROPERTY_CELL_TYPE,
+
+ // "Data", objects that cannot contain non-map-word pointers to heap
+ // objects.
+ HEAP_NUMBER_TYPE,
PROXY_TYPE,
BYTE_ARRAY_TYPE,
PIXEL_ARRAY_TYPE,
- EXTERNAL_BYTE_ARRAY_TYPE,
+ EXTERNAL_BYTE_ARRAY_TYPE, // FIRST_EXTERNAL_ARRAY_TYPE
EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE,
EXTERNAL_SHORT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE,
EXTERNAL_INT_ARRAY_TYPE,
EXTERNAL_UNSIGNED_INT_ARRAY_TYPE,
- EXTERNAL_FLOAT_ARRAY_TYPE,
- FILLER_TYPE,
- SMI_TYPE,
+ EXTERNAL_FLOAT_ARRAY_TYPE, // LAST_EXTERNAL_ARRAY_TYPE
+ FILLER_TYPE, // LAST_DATA_TYPE
+ // Structs.
ACCESSOR_INFO_TYPE,
ACCESS_CHECK_INFO_TYPE,
INTERCEPTOR_INFO_TYPE,
- SHARED_FUNCTION_INFO_TYPE,
CALL_HANDLER_INFO_TYPE,
FUNCTION_TEMPLATE_INFO_TYPE,
OBJECT_TEMPLATE_INFO_TYPE,
SIGNATURE_INFO_TYPE,
TYPE_SWITCH_INFO_TYPE,
+ SCRIPT_TYPE,
#ifdef ENABLE_DEBUGGER_SUPPORT
DEBUG_INFO_TYPE,
BREAK_POINT_INFO_TYPE,
#endif
- SCRIPT_TYPE,
- JS_VALUE_TYPE,
+ FIXED_ARRAY_TYPE,
+ SHARED_FUNCTION_INFO_TYPE,
+
+ JS_VALUE_TYPE, // FIRST_JS_OBJECT_TYPE
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
- JS_REGEXP_TYPE,
+ JS_REGEXP_TYPE, // LAST_JS_OBJECT_TYPE
JS_FUNCTION_TYPE,
// Pseudo-types
- FIRST_NONSTRING_TYPE = MAP_TYPE,
FIRST_TYPE = 0x0,
- INVALID_TYPE = FIRST_TYPE - 1,
LAST_TYPE = JS_FUNCTION_TYPE,
+ INVALID_TYPE = FIRST_TYPE - 1,
+ FIRST_NONSTRING_TYPE = MAP_TYPE,
+ // Boundaries for testing for an external array.
+ FIRST_EXTERNAL_ARRAY_TYPE = EXTERNAL_BYTE_ARRAY_TYPE,
+ LAST_EXTERNAL_ARRAY_TYPE = EXTERNAL_FLOAT_ARRAY_TYPE,
+ // Boundary for promotion to old data space/old pointer space.
+ LAST_DATA_TYPE = FILLER_TYPE,
// Boundaries for testing the type is a JavaScript "object". Note that
// function objects are not counted as objects, even though they are
// implemented as such; only values whose typeof is "object" are included.
@@ -1497,6 +1509,10 @@ class JSObject: public HeapObject {
#endif
Object* SlowReverseLookup(Object* value);
+ // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
+ // Also maximal value of JSArray's length property.
+ static const uint32_t kMaxElementCount = 0xffffffffu;
+
static const uint32_t kMaxGap = 1024;
static const int kMaxFastElementsLength = 5000;
static const int kInitialMaxFastElementArray = 100000;
@@ -1623,8 +1639,14 @@ class FixedArray: public Array {
// Casting.
static inline FixedArray* cast(Object* obj);
- // Align data at kPointerSize, even if Array.kHeaderSize isn't aligned.
- static const int kHeaderSize = POINTER_SIZE_ALIGN(Array::kHeaderSize);
+ static const int kHeaderSize = Array::kAlignedSize;
+
+ // Maximal allowed size, in bytes, of a single FixedArray.
+ // Prevents overflowing size computations, as well as extreme memory
+ // consumption.
+ static const int kMaxSize = 512 * MB;
+ // Maximally allowed length of a FixedArray.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / kPointerSize;
// Dispatched behavior.
int FixedArraySize() { return SizeFor(length()); }
@@ -1875,6 +1897,11 @@ class HashTable: public FixedArray {
return Smi::cast(get(kNumberOfElementsIndex))->value();
}
+ // Returns the number of deleted elements in the hash table.
+ int NumberOfDeletedElements() {
+ return Smi::cast(get(kNumberOfDeletedElementsIndex))->value();
+ }
+
// Returns the capacity of the hash table.
int Capacity() {
return Smi::cast(get(kCapacityIndex))->value();
@@ -1886,8 +1913,14 @@ class HashTable: public FixedArray {
// ElementRemoved should be called whenever an element is removed from
// a hash table.
- void ElementRemoved() { SetNumberOfElements(NumberOfElements() - 1); }
- void ElementsRemoved(int n) { SetNumberOfElements(NumberOfElements() - n); }
+ void ElementRemoved() {
+ SetNumberOfElements(NumberOfElements() - 1);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + 1);
+ }
+ void ElementsRemoved(int n) {
+ SetNumberOfElements(NumberOfElements() - n);
+ SetNumberOfDeletedElements(NumberOfDeletedElements() + n);
+ }
// Returns a new HashTable object. Might return Failure.
static Object* Allocate(int at_least_space_for);
@@ -1914,17 +1947,24 @@ class HashTable: public FixedArray {
}
static const int kNumberOfElementsIndex = 0;
- static const int kCapacityIndex = 1;
- static const int kPrefixStartIndex = 2;
- static const int kElementsStartIndex =
+ static const int kNumberOfDeletedElementsIndex = 1;
+ static const int kCapacityIndex = 2;
+ static const int kPrefixStartIndex = 3;
+ static const int kElementsStartIndex =
kPrefixStartIndex + Shape::kPrefixSize;
- static const int kEntrySize = Shape::kEntrySize;
- static const int kElementsStartOffset =
+ static const int kEntrySize = Shape::kEntrySize;
+ static const int kElementsStartOffset =
kHeaderSize + kElementsStartIndex * kPointerSize;
// Constant used for denoting a absent entry.
static const int kNotFound = -1;
+ // Maximal capacity of HashTable. Based on maximal length of underlying
+ // FixedArray. Staying below kMaxCapacity also ensures that EntryToIndex
+ // cannot overflow.
+ static const int kMaxCapacity =
+ (FixedArray::kMaxLength - kElementsStartOffset) / kEntrySize;
+
// Find entry for key otherwise return -1.
int FindEntry(Key key);
@@ -1944,12 +1984,18 @@ class HashTable: public FixedArray {
fast_set(this, kNumberOfElementsIndex, Smi::FromInt(nof));
}
+ // Update the number of deleted elements in the hash table.
+ void SetNumberOfDeletedElements(int nod) {
+ fast_set(this, kNumberOfDeletedElementsIndex, Smi::FromInt(nod));
+ }
+
// Sets the capacity of the hash table.
void SetCapacity(int capacity) {
// To scale a computed hash code to fit within the hash table, we
// use bit-wise AND with a mask, so the capacity must be positive
// and non-zero.
ASSERT(capacity > 0);
+ ASSERT(capacity <= kMaxCapacity);
fast_set(this, kCapacityIndex, Smi::FromInt(capacity));
}
@@ -1960,6 +2006,14 @@ class HashTable: public FixedArray {
return (hash + GetProbeOffset(number)) & (size - 1);
}
+ static uint32_t FirstProbe(uint32_t hash, uint32_t size) {
+ return hash & (size - 1);
+ }
+
+ static uint32_t NextProbe(uint32_t last, uint32_t number, uint32_t size) {
+ return (last + number) & (size - 1);
+ }
+
// Ensure enough space for n additional elements.
Object* EnsureCapacity(int n, Key key);
};
@@ -2289,6 +2343,11 @@ class ByteArray: public Array {
static const int kHeaderSize = Array::kHeaderSize;
static const int kAlignedSize = Array::kAlignedSize;
+ // Maximal memory consumption for a single ByteArray.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single ByteArray.
+ static const int kMaxLength = kMaxSize - kHeaderSize;
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(ByteArray);
};
@@ -3575,6 +3634,14 @@ class JSRegExp: public JSObject {
static const int kIrregexpCaptureCountIndex = kDataIndex + 3;
static const int kIrregexpDataSize = kIrregexpCaptureCountIndex + 1;
+
+ // Offsets directly into the data fixed array.
+ static const int kDataTagOffset =
+ FixedArray::kHeaderSize + kTagIndex * kPointerSize;
+ static const int kDataAsciiCodeOffset =
+ FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+ static const int kIrregexpCaptureCountOffset =
+ FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
};
@@ -3998,6 +4065,12 @@ class SeqAsciiString: public SeqString {
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+ // Maximal memory usage for a single sequential ASCII string.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single sequential ASCII string.
+ // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
+ static const int kMaxLength = (kMaxSize - kHeaderSize);
+
// Support for StringInputBuffer.
inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset,
@@ -4044,6 +4117,12 @@ class SeqTwoByteString: public SeqString {
static const int kHeaderSize = String::kSize;
static const int kAlignedSize = POINTER_SIZE_ALIGN(kHeaderSize);
+ // Maximal memory usage for a single sequential two-byte string.
+ static const int kMaxSize = 512 * MB;
+ // Maximal length of a single sequential two-byte string.
+ // Q.v. String::kMaxLength which is the maximal size of concatenated strings.
+ static const int kMaxLength = (kMaxSize - kHeaderSize) / sizeof(uint16_t);
+
// Support for StringInputBuffer.
inline void SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset_ptr,
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index a3bc6dab2..4090a080f 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -371,7 +371,7 @@ class RegExpBuilder: public ZoneObject {
void AddAtom(RegExpTree* tree);
void AddAssertion(RegExpTree* tree);
void NewAlternative(); // '|'
- void AddQuantifierToAtom(int min, int max, bool is_greedy);
+ void AddQuantifierToAtom(int min, int max, RegExpQuantifier::Type type);
RegExpTree* ToRegExp();
private:
void FlushCharacters();
@@ -503,7 +503,9 @@ RegExpTree* RegExpBuilder::ToRegExp() {
}
-void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
+void RegExpBuilder::AddQuantifierToAtom(int min,
+ int max,
+ RegExpQuantifier::Type type) {
if (pending_empty_) {
pending_empty_ = false;
return;
@@ -543,7 +545,7 @@ void RegExpBuilder::AddQuantifierToAtom(int min, int max, bool is_greedy) {
UNREACHABLE();
return;
}
- terms_.Add(new RegExpQuantifier(min, max, is_greedy, atom));
+ terms_.Add(new RegExpQuantifier(min, max, type, atom));
LAST(ADD_TERM);
}
@@ -3332,7 +3334,7 @@ Handle<FixedArray> CompileTimeValue::GetValue(Expression* expression) {
ArrayLiteral* array_literal = expression->AsArrayLiteral();
ASSERT(array_literal != NULL && array_literal->is_simple());
result->set(kTypeSlot, Smi::FromInt(ARRAY_LITERAL));
- result->set(kElementsSlot, *array_literal->literals());
+ result->set(kElementsSlot, *array_literal->constant_elements());
}
return result;
}
@@ -3596,7 +3598,7 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> var_name,
top_scope_->NewUnresolved(function_name, inside_with());
fproxy->BindTo(fvar);
body.Add(new ExpressionStatement(
- new Assignment(Token::INIT_VAR, fproxy,
+ new Assignment(Token::INIT_CONST, fproxy,
NEW(ThisFunction()),
RelocInfo::kNoPosition)));
}
@@ -4278,12 +4280,16 @@ RegExpTree* RegExpParser::ParseDisjunction() {
default:
continue;
}
- bool is_greedy = true;
+ RegExpQuantifier::Type type = RegExpQuantifier::GREEDY;
if (current() == '?') {
- is_greedy = false;
+ type = RegExpQuantifier::NON_GREEDY;
+ Advance();
+ } else if (FLAG_regexp_possessive_quantifier && current() == '+') {
+ // FLAG_regexp_possessive_quantifier is a debug-only flag.
+ type = RegExpQuantifier::POSSESSIVE;
Advance();
}
- builder->AddQuantifierToAtom(min, max, is_greedy);
+ builder->AddQuantifierToAtom(min, max, type);
}
}
@@ -4705,6 +4711,11 @@ unsigned* ScriptDataImpl::Data() {
}
+bool ScriptDataImpl::HasError() {
+ return has_error();
+}
+
+
ScriptDataImpl* PreParse(Handle<String> source,
unibrow::CharacterStream* stream,
v8::Extension* extension) {
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 7328e8119..a67284c28 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -91,6 +91,7 @@ class ScriptDataImpl : public ScriptData {
virtual ~ScriptDataImpl();
virtual int Length();
virtual unsigned* Data();
+ virtual bool HasError();
FunctionEntry GetFunctionEnd(int start);
bool SanityCheck();
diff --git a/deps/v8/src/regexp-delay.js b/deps/v8/src/regexp-delay.js
index 14c364457..7bec455d3 100644
--- a/deps/v8/src/regexp-delay.js
+++ b/deps/v8/src/regexp-delay.js
@@ -136,13 +136,7 @@ function CompileRegExp(pattern, flags) {
function DoRegExpExec(regexp, string, index) {
- return %RegExpExec(regexp, string, index, lastMatchInfo);
-}
-
-
-function DoRegExpExecGlobal(regexp, string) {
- // Returns an array of arrays of substring indices.
- return %RegExpExecGlobal(regexp, string, lastMatchInfo);
+ return %_RegExpExec(regexp, string, index, lastMatchInfo);
}
@@ -170,7 +164,7 @@ function RegExpExec(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
@@ -227,7 +221,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [this, s, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
- var matchIndices = %RegExpExec(this, s, i, lastMatchInfo);
+ var matchIndices = %_RegExpExec(this, s, i, lastMatchInfo);
if (matchIndices == null) {
if (this.global) this.lastIndex = 0;
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.cc b/deps/v8/src/regexp-macro-assembler-tracer.cc
index 0aad33737..c5c2919c3 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.cc
+++ b/deps/v8/src/regexp-macro-assembler-tracer.cc
@@ -307,18 +307,11 @@ void RegExpMacroAssemblerTracer::CheckCharacters(Vector<const uc16> str,
bool RegExpMacroAssemblerTracer::CheckSpecialCharacterClass(
uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
bool supported = assembler_->CheckSpecialCharacterClass(type,
- cp_offset,
- check_offset,
on_no_match);
- PrintF(" CheckSpecialCharacterClass(type='%c', offset=%d, "
- "check_offset=%s, label[%08x]): %s;\n",
+ PrintF(" CheckSpecialCharacterClass(type='%c', label[%08x]): %s;\n",
type,
- cp_offset,
- check_offset ? "true" : "false",
on_no_match,
supported ? "true" : "false");
return supported;
diff --git a/deps/v8/src/regexp-macro-assembler-tracer.h b/deps/v8/src/regexp-macro-assembler-tracer.h
index 28ca5f340..9608f9e16 100644
--- a/deps/v8/src/regexp-macro-assembler-tracer.h
+++ b/deps/v8/src/regexp-macro-assembler-tracer.h
@@ -69,8 +69,6 @@ class RegExpMacroAssemblerTracer: public RegExpMacroAssembler {
uc16 and_with,
Label* on_not_equal);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index 9ae19d723..c73e02a8e 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -143,17 +143,6 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
input_end,
offsets_vector,
previous_index == 0);
-
- if (res == SUCCESS) {
- // Capture values are relative to start_offset only.
- // Convert them to be relative to start of string.
- for (int i = 0; i < offsets_vector_length; i++) {
- if (offsets_vector[i] >= 0) {
- offsets_vector[i] += previous_index;
- }
- }
- }
-
return res;
}
@@ -167,7 +156,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
int* output,
bool at_start) {
typedef int (*matcher)(String*, int, const byte*,
- const byte*, int*, int, Address);
+ const byte*, int*, int, Address, int);
matcher matcher_func = FUNCTION_CAST<matcher>(code->entry());
int at_start_val = at_start ? 1 : 0;
@@ -176,6 +165,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
RegExpStack stack;
Address stack_base = RegExpStack::stack_base();
+ int direct_call = 0;
int result = CALL_GENERATED_REGEXP_CODE(matcher_func,
input,
start_offset,
@@ -183,7 +173,8 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Execute(
input_end,
output,
at_start_val,
- stack_base);
+ stack_base,
+ direct_call);
ASSERT(result <= SUCCESS);
ASSERT(result >= RETRY);
diff --git a/deps/v8/src/regexp-macro-assembler.h b/deps/v8/src/regexp-macro-assembler.h
index aa01096d2..7cc95110e 100644
--- a/deps/v8/src/regexp-macro-assembler.h
+++ b/deps/v8/src/regexp-macro-assembler.h
@@ -123,8 +123,6 @@ class RegExpMacroAssembler {
// not have custom support.
// May clobber the current loaded character.
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
return false;
}
diff --git a/deps/v8/src/regexp-stack.h b/deps/v8/src/regexp-stack.h
index fbaa6fbb5..b4fa2e920 100644
--- a/deps/v8/src/regexp-stack.h
+++ b/deps/v8/src/regexp-stack.h
@@ -98,12 +98,24 @@ class RegExpStack {
void Free();
};
+ // Address of allocated memory.
+ static Address memory_address() {
+ return reinterpret_cast<Address>(&thread_local_.memory_);
+ }
+
+ // Address of size of allocated memory.
+ static Address memory_size_address() {
+ return reinterpret_cast<Address>(&thread_local_.memory_size_);
+ }
+
// Resets the buffer if it has grown beyond the default/minimum size.
// After this, the buffer is either the default size, or it is empty, so
// you have to call EnsureCapacity before using it again.
static void Reset();
static ThreadLocal thread_local_;
+
+ friend class ExternalReference;
};
}} // namespace v8::internal
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 834983344..3a6e176cb 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -559,6 +559,73 @@ static Object* Runtime_IsConstructCall(Arguments args) {
}
+// Recursively traverses hidden prototypes if property is not found
+static void GetOwnPropertyImplementation(JSObject* obj,
+ String* name,
+ LookupResult* result) {
+ obj->LocalLookupRealNamedProperty(name, result);
+
+ if (!result->IsProperty()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype())
+ GetOwnPropertyImplementation(JSObject::cast(proto),
+ name, result);
+ }
+}
+
+
+// Returns an array with the property description:
+// if args[1] is not a property on args[0]
+// returns undefined
+// if args[1] is a data property on args[0]
+// [false, value, Writeable, Enumerable, Configurable]
+// if args[1] is an accessor on args[0]
+// [true, GetFunction, SetFunction, Enumerable, Configurable]
+static Object* Runtime_GetOwnProperty(Arguments args) {
+ HandleScope scope;
+ Handle<FixedArray> elms = Factory::NewFixedArray(5);
+ Handle<JSArray> desc = Factory::NewJSArrayWithElements(elms);
+ LookupResult result;
+ CONVERT_CHECKED(JSObject, obj, args[0]);
+ CONVERT_CHECKED(String, name, args[1]);
+
+ // Use recursive implementation to also traverse hidden prototypes
+ GetOwnPropertyImplementation(obj, name, &result);
+
+ if (!result.IsProperty())
+ return Heap::undefined_value();
+
+ if (result.type() == CALLBACKS) {
+ Object* structure = result.GetCallbackObject();
+ if (structure->IsProxy()) {
+ // Property that is internally implemented as a callback.
+ Object* value = obj->GetPropertyWithCallback(
+ obj, structure, name, result.holder());
+ elms->set(0, Heap::false_value());
+ elms->set(1, value);
+ elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ } else if (structure->IsFixedArray()) {
+ // __defineGetter__/__defineSetter__ callback.
+ elms->set(0, Heap::true_value());
+ elms->set(1, FixedArray::cast(structure)->get(0));
+ elms->set(2, FixedArray::cast(structure)->get(1));
+ } else {
+ // TODO(ricow): Handle API callbacks.
+ return Heap::undefined_value();
+ }
+ } else {
+ elms->set(0, Heap::false_value());
+ elms->set(1, result.GetLazyValue());
+ elms->set(2, Heap::ToBoolean(!result.IsReadOnly()));
+ }
+
+ elms->set(3, Heap::ToBoolean(!result.IsDontEnum()));
+ elms->set(4, Heap::ToBoolean(!result.IsReadOnly()));
+ return *desc;
+}
+
+
static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope;
ASSERT(args.length() == 3);
@@ -1158,6 +1225,7 @@ static Object* Runtime_RegExpExec(Arguments args) {
RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index >= 0);
RUNTIME_ASSERT(index <= subject->length());
+ Counters::regexp_entry_runtime.Increment();
Handle<Object> result = RegExpImpl::Exec(regexp,
subject,
index,
@@ -1384,6 +1452,17 @@ static Object* CharCodeAt(String* subject, Object* index) {
}
+static Object* CharFromCode(Object* char_code) {
+ uint32_t code;
+ if (Array::IndexFromObject(char_code, &code)) {
+ if (code <= 0xffff) {
+ return Heap::LookupSingleCharacterStringFromCode(code);
+ }
+ }
+ return Heap::empty_string();
+}
+
+
static Object* Runtime_StringCharCodeAt(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -1394,16 +1473,20 @@ static Object* Runtime_StringCharCodeAt(Arguments args) {
}
+static Object* Runtime_StringCharAt(Arguments args) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 2);
+
+ CONVERT_CHECKED(String, subject, args[0]);
+ Object* index = args[1];
+ return CharFromCode(CharCodeAt(subject, index));
+}
+
+
static Object* Runtime_CharFromCode(Arguments args) {
NoHandleAllocation ha;
ASSERT(args.length() == 1);
- uint32_t code;
- if (Array::IndexFromObject(args[0], &code)) {
- if (code <= 0xffff) {
- return Heap::LookupSingleCharacterStringFromCode(code);
- }
- }
- return Heap::empty_string();
+ return CharFromCode(args[0]);
}
// Forward declarations.
@@ -1509,7 +1592,7 @@ class ReplacementStringBuilder {
void IncrementCharacterCount(int by) {
- if (character_count_ > Smi::kMaxValue - by) {
+ if (character_count_ > String::kMaxLength - by) {
V8::FatalProcessOutOfMemory("String.replace result too large.");
}
character_count_ += by;
@@ -2473,6 +2556,7 @@ static Object* Runtime_SubString(Arguments args) {
RUNTIME_ASSERT(end >= start);
RUNTIME_ASSERT(start >= 0);
RUNTIME_ASSERT(end <= value->length());
+ Counters::sub_string_runtime.Increment();
return value->SubString(start, end);
}
@@ -2724,7 +2808,6 @@ static Object* Runtime_GetProperty(Arguments args) {
}
-
// KeyedStringGetProperty is called from KeyedLoadIC::GenerateGeneric.
static Object* Runtime_KeyedGetProperty(Arguments args) {
NoHandleAllocation ha;
@@ -2776,6 +2859,13 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
// If value is the hole do the general lookup.
}
}
+ } else if (args[0]->IsString() && args[1]->IsSmi()) {
+ // Fast case for string indexing using [] with a smi index.
+ HandleScope scope;
+ Handle<String> str = args.at<String>(0);
+ int index = Smi::cast(args[1])->value();
+ Handle<Object> result = GetCharAt(str, index);
+ return *result;
}
// Fall back to GetObjectProperty.
@@ -3362,6 +3452,7 @@ static Object* Runtime_URIEscape(Arguments args) {
escaped_length += 3;
}
// We don't allow strings that are longer than a maximal length.
+ ASSERT(String::kMaxLength < 0x7fffffff - 6); // Cannot overflow.
if (escaped_length > String::kMaxLength) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
@@ -3908,20 +3999,19 @@ static inline void StringBuilderConcatHelper(String* special,
static Object* Runtime_StringBuilderConcat(Arguments args) {
NoHandleAllocation ha;
- ASSERT(args.length() == 2);
+ ASSERT(args.length() == 3);
CONVERT_CHECKED(JSArray, array, args[0]);
- CONVERT_CHECKED(String, special, args[1]);
+ if (!args[1]->IsSmi()) {
+ Top::context()->mark_out_of_memory();
+ return Failure::OutOfMemoryException();
+ }
+ int array_length = Smi::cast(args[1])->value();
+ CONVERT_CHECKED(String, special, args[2]);
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
int special_length = special->length();
- Object* smi_array_length = array->length();
- if (!smi_array_length->IsSmi()) {
- Top::context()->mark_out_of_memory();
- return Failure::OutOfMemoryException();
- }
- int array_length = Smi::cast(smi_array_length)->value();
if (!array->HasFastElements()) {
return Top::Throw(Heap::illegal_argument_symbol());
}
@@ -3939,6 +4029,7 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
bool ascii = special->IsAsciiRepresentation();
int position = 0;
+ int increment = 0;
for (int i = 0; i < array_length; i++) {
Object* elt = fixed_array->get(i);
if (elt->IsSmi()) {
@@ -3951,10 +4042,10 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
if (pos + len > special_length) {
return Top::Throw(Heap::illegal_argument_symbol());
}
- position += len;
+ increment = len;
} else {
// Position and length encoded in two smis.
- position += (-len);
+ increment = (-len);
// Get the position and check that it is also a smi.
i++;
if (i >= array_length) {
@@ -3968,17 +4059,18 @@ static Object* Runtime_StringBuilderConcat(Arguments args) {
} else if (elt->IsString()) {
String* element = String::cast(elt);
int element_length = element->length();
- position += element_length;
+ increment = element_length;
if (ascii && !element->IsAsciiRepresentation()) {
ascii = false;
}
} else {
return Top::Throw(Heap::illegal_argument_symbol());
}
- if (position > String::kMaxLength) {
+ if (increment > String::kMaxLength - position) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
+ position += increment;
}
int length = position;
@@ -4192,6 +4284,8 @@ static Object* Runtime_StringCompare(Arguments args) {
CONVERT_CHECKED(String, x, args[0]);
CONVERT_CHECKED(String, y, args[1]);
+ Counters::string_compare_runtime.Increment();
+
// A few fast case tests before we flatten.
if (x == y) return Smi::FromInt(EQUAL);
if (y->length() == 0) {
@@ -5227,51 +5321,31 @@ static Object* Runtime_CompileString(Arguments args) {
}
-static Handle<JSFunction> GetBuiltinFunction(String* name) {
- LookupResult result;
- Top::global_context()->builtins()->LocalLookup(name, &result);
- return Handle<JSFunction>(JSFunction::cast(result.GetValue()));
-}
-
+static ObjectPair Runtime_ResolvePossiblyDirectEval(Arguments args) {
+ ASSERT(args.length() == 3);
+ if (!args[0]->IsJSFunction()) {
+ return MakePair(Top::ThrowIllegalOperation(), NULL);
+ }
-static Object* CompileDirectEval(Handle<String> source) {
- // Compute the eval context.
HandleScope scope;
+ Handle<JSFunction> callee = args.at<JSFunction>(0);
+ Handle<Object> receiver; // Will be overwritten.
+
+ // Compute the calling context.
+ Handle<Context> context = Handle<Context>(Top::context());
+#ifdef DEBUG
+ // Make sure Top::context() agrees with the old code that traversed
+ // the stack frames to compute the context.
StackFrameLocator locator;
JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- Handle<Context> context(Context::cast(frame->context()));
- bool is_global = context->IsGlobalContext();
-
- // Compile source string in the current context.
- Handle<JSFunction> boilerplate = Compiler::CompileEval(
- source,
- context,
- is_global,
- Compiler::DONT_VALIDATE_JSON);
- if (boilerplate.is_null()) return Failure::Exception();
- Handle<JSFunction> fun =
- Factory::NewFunctionFromBoilerplate(boilerplate, context, NOT_TENURED);
- return *fun;
-}
-
-
-static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
- ASSERT(args.length() == 2);
-
- HandleScope scope;
-
- CONVERT_ARG_CHECKED(JSFunction, callee, 0);
-
- Handle<Object> receiver;
+ ASSERT(Context::cast(frame->context()) == *context);
+#endif
// Find where the 'eval' symbol is bound. It is unaliased only if
// it is bound in the global context.
- StackFrameLocator locator;
- JavaScriptFrame* frame = locator.FindJavaScriptFrame(0);
- Handle<Context> context(Context::cast(frame->context()));
- int index;
- PropertyAttributes attributes;
- while (!context.is_null()) {
+ int index = -1;
+ PropertyAttributes attributes = ABSENT;
+ while (true) {
receiver = context->Lookup(Factory::eval_symbol(), FOLLOW_PROTOTYPE_CHAIN,
&index, &attributes);
// Stop search when eval is found or when the global context is
@@ -5290,46 +5364,42 @@ static Object* Runtime_ResolvePossiblyDirectEval(Arguments args) {
Handle<Object> name = Factory::eval_symbol();
Handle<Object> reference_error =
Factory::NewReferenceError("not_defined", HandleVector(&name, 1));
- return Top::Throw(*reference_error);
- }
-
- if (context->IsGlobalContext()) {
- // 'eval' is bound in the global context, but it may have been overwritten.
- // Compare it to the builtin 'GlobalEval' function to make sure.
- Handle<JSFunction> global_eval =
- GetBuiltinFunction(Heap::global_eval_symbol());
- if (global_eval.is_identical_to(callee)) {
- // A direct eval call.
- if (args[1]->IsString()) {
- CONVERT_ARG_CHECKED(String, source, 1);
- // A normal eval call on a string. Compile it and return the
- // compiled function bound in the local context.
- Object* compiled_source = CompileDirectEval(source);
- if (compiled_source->IsFailure()) return compiled_source;
- receiver = Handle<Object>(frame->receiver());
- callee = Handle<JSFunction>(JSFunction::cast(compiled_source));
- } else {
- // An eval call that is not called on a string. Global eval
- // deals better with this.
- receiver = Handle<Object>(Top::global_context()->global());
- }
- } else {
- // 'eval' is overwritten. Just call the function with the given arguments.
- receiver = Handle<Object>(Top::global_context()->global());
- }
- } else {
+ return MakePair(Top::Throw(*reference_error), NULL);
+ }
+
+ if (!context->IsGlobalContext()) {
// 'eval' is not bound in the global context. Just call the function
// with the given arguments. This is not necessarily the global eval.
if (receiver->IsContext()) {
context = Handle<Context>::cast(receiver);
receiver = Handle<Object>(context->get(index));
+ } else if (receiver->IsJSContextExtensionObject()) {
+ receiver = Handle<JSObject>(Top::context()->global()->global_receiver());
}
+ return MakePair(*callee, *receiver);
}
- Handle<FixedArray> call = Factory::NewFixedArray(2);
- call->set(0, *callee);
- call->set(1, *receiver);
- return *call;
+ // 'eval' is bound in the global context, but it may have been overwritten.
+ // Compare it to the builtin 'GlobalEval' function to make sure.
+ if (*callee != Top::global_context()->global_eval_fun() ||
+ !args[1]->IsString()) {
+ return MakePair(*callee, Top::context()->global()->global_receiver());
+ }
+
+ // Deal with a normal eval call with a string argument. Compile it
+ // and return the compiled function bound in the local context.
+ Handle<String> source = args.at<String>(1);
+ Handle<JSFunction> boilerplate = Compiler::CompileEval(
+ source,
+ Handle<Context>(Top::context()),
+ Top::context()->IsGlobalContext(),
+ Compiler::DONT_VALIDATE_JSON);
+ if (boilerplate.is_null()) return MakePair(Failure::Exception(), NULL);
+ callee = Factory::NewFunctionFromBoilerplate(
+ boilerplate,
+ Handle<Context>(Top::context()),
+ NOT_TENURED);
+ return MakePair(*callee, args[2]);
}
@@ -5386,11 +5456,11 @@ class ArrayConcatVisitor {
uint32_t index_limit,
bool fast_elements) :
storage_(storage), index_limit_(index_limit),
- fast_elements_(fast_elements), index_offset_(0) { }
+ index_offset_(0), fast_elements_(fast_elements) { }
void visit(uint32_t i, Handle<Object> elm) {
- uint32_t index = i + index_offset_;
- if (index >= index_limit_) return;
+ if (i >= index_limit_ - index_offset_) return;
+ uint32_t index = index_offset_ + i;
if (fast_elements_) {
ASSERT(index < static_cast<uint32_t>(storage_->length()));
@@ -5406,14 +5476,23 @@ class ArrayConcatVisitor {
}
void increase_index_offset(uint32_t delta) {
- index_offset_ += delta;
+ if (index_limit_ - index_offset_ < delta) {
+ index_offset_ = index_limit_;
+ } else {
+ index_offset_ += delta;
+ }
}
+ Handle<FixedArray> storage() { return storage_; }
+
private:
Handle<FixedArray> storage_;
+ // Limit on the accepted indices. Elements with indices larger than the
+ // limit are ignored by the visitor.
uint32_t index_limit_;
- bool fast_elements_;
+ // Index after last seen index. Always less than or equal to index_limit_.
uint32_t index_offset_;
+ bool fast_elements_;
};
@@ -5585,6 +5664,11 @@ static uint32_t IterateElements(Handle<JSObject> receiver,
*
* If a ArrayConcatVisitor object is given, the visitor is called with
* parameters, element's index + visitor_index_offset and the element.
+ *
+ * The returned number of elements is an upper bound on the actual number
+ * of elements added. If the same element occurs in more than one object
+ * in the array's prototype chain, it will be counted more than once, but
+ * will only occur once in the result.
*/
static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
ArrayConcatVisitor* visitor) {
@@ -5607,8 +5691,14 @@ static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
uint32_t nof_elements = 0;
for (int i = objects.length() - 1; i >= 0; i--) {
Handle<JSObject> obj = objects[i];
- nof_elements +=
+ uint32_t encountered_elements =
IterateElements(Handle<JSObject>::cast(obj), range, visitor);
+
+ if (encountered_elements > JSObject::kMaxElementCount - nof_elements) {
+ nof_elements = JSObject::kMaxElementCount;
+ } else {
+ nof_elements += encountered_elements;
+ }
}
return nof_elements;
@@ -5625,10 +5715,12 @@ static uint32_t IterateArrayAndPrototypeElements(Handle<JSArray> array,
* elements. If an argument is not an Array object, the function
* visits the object as if it is an one-element array.
*
- * If the result array index overflows 32-bit integer, the rounded
+ * If the result array index overflows 32-bit unsigned integer, the rounded
* non-negative number is used as new length. For example, if one
* array length is 2^32 - 1, second array length is 1, the
* concatenated array length is 0.
+ * TODO(lrn) Change length behavior to ECMAScript 5 specification (length
+ * is one more than the last array index to get a value assigned).
*/
static uint32_t IterateArguments(Handle<JSArray> arguments,
ArrayConcatVisitor* visitor) {
@@ -5644,16 +5736,23 @@ static uint32_t IterateArguments(Handle<JSArray> arguments,
IterateArrayAndPrototypeElements(array, visitor);
// Total elements of array and its prototype chain can be more than
// the array length, but ArrayConcat can only concatenate at most
- // the array length number of elements.
- visited_elements += (nof_elements > len) ? len : nof_elements;
+ // the array length number of elements. We use the length as an estimate
+ // for the actual number of elements added.
+ uint32_t added_elements = (nof_elements > len) ? len : nof_elements;
+ if (JSArray::kMaxElementCount - visited_elements < added_elements) {
+ visited_elements = JSArray::kMaxElementCount;
+ } else {
+ visited_elements += added_elements;
+ }
if (visitor) visitor->increase_index_offset(len);
-
} else {
if (visitor) {
visitor->visit(0, obj);
visitor->increase_index_offset(1);
}
- visited_elements++;
+ if (visited_elements < JSArray::kMaxElementCount) {
+ visited_elements++;
+ }
}
}
return visited_elements;
@@ -5663,6 +5762,8 @@ static uint32_t IterateArguments(Handle<JSArray> arguments,
/**
* Array::concat implementation.
* See ECMAScript 262, 15.4.4.4.
+ * TODO(lrn): Fix non-compliance for very large concatenations and update to
+ * following the ECMAScript 5 specification.
*/
static Object* Runtime_ArrayConcat(Arguments args) {
ASSERT(args.length() == 1);
@@ -5679,12 +5780,18 @@ static Object* Runtime_ArrayConcat(Arguments args) {
{ AssertNoAllocation nogc;
for (uint32_t i = 0; i < num_of_args; i++) {
Object* obj = arguments->GetElement(i);
+ uint32_t length_estimate;
if (obj->IsJSArray()) {
- result_length +=
+ length_estimate =
static_cast<uint32_t>(JSArray::cast(obj)->length()->Number());
} else {
- result_length++;
+ length_estimate = 1;
+ }
+ if (JSObject::kMaxElementCount - result_length < length_estimate) {
+ result_length = JSObject::kMaxElementCount;
+ break;
}
+ result_length += length_estimate;
}
}
@@ -5718,7 +5825,8 @@ static Object* Runtime_ArrayConcat(Arguments args) {
IterateArguments(arguments, &visitor);
result->set_length(*len);
- result->set_elements(*storage);
+ // Please note the storage might have changed in the visitor.
+ result->set_elements(*visitor.storage());
return *result;
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index f13c42433..b6542a613 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -61,6 +61,8 @@ namespace internal {
\
F(IsConstructCall, 0, 1) \
\
+ F(GetOwnProperty, 2, 1) \
+ \
/* Utilities */ \
F(GetCalledFunction, 0, 1) \
F(GetFunctionDelegate, 1, 1) \
@@ -103,7 +105,7 @@ namespace internal {
F(NumberUnaryMinus, 1, 1) \
\
F(StringAdd, 2, 1) \
- F(StringBuilderConcat, 2, 1) \
+ F(StringBuilderConcat, 3, 1) \
\
/* Bit operations */ \
F(NumberOr, 2, 1) \
@@ -146,6 +148,7 @@ namespace internal {
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
+ F(StringCharAt, 2, 1) \
F(StringIndexOf, 3, 1) \
F(StringLastIndexOf, 3, 1) \
F(StringLocaleCompare, 2, 1) \
@@ -202,7 +205,7 @@ namespace internal {
\
/* Eval */ \
F(GlobalReceiver, 1, 1) \
- F(ResolvePossiblyDirectEval, 2, 1) \
+ F(ResolvePossiblyDirectEval, 3, 2) \
\
F(SetProperty, -1 /* 3 or 4 */, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
diff --git a/deps/v8/src/runtime.js b/deps/v8/src/runtime.js
index 1b65fe51d..ce2f197f9 100644
--- a/deps/v8/src/runtime.js
+++ b/deps/v8/src/runtime.js
@@ -114,30 +114,33 @@ function STRICT_EQUALS(x) {
// ECMA-262, section 11.8.5, page 53. The 'ncr' parameter is used as
// the result when either (or both) the operands are NaN.
function COMPARE(x, ncr) {
- // Fast case for numbers and strings.
- if (IS_NUMBER(this) && IS_NUMBER(x)) {
- return %NumberCompare(this, x, ncr);
- }
- if (IS_STRING(this) && IS_STRING(x)) {
- return %StringCompare(this, x);
- }
+ var left;
- // If one of the operands is undefined, it will convert to NaN and
- // thus the result should be as if one of the operands was NaN.
- if (IS_UNDEFINED(this) || IS_UNDEFINED(x)) {
+ // Fast cases for string, numbers and undefined compares.
+ if (IS_STRING(this)) {
+ if (IS_STRING(x)) return %_StringCompare(this, x);
+ if (IS_UNDEFINED(x)) return ncr;
+ left = this;
+ } else if (IS_NUMBER(this)) {
+ if (IS_NUMBER(x)) return %NumberCompare(this, x, ncr);
+ if (IS_UNDEFINED(x)) return ncr;
+ left = this;
+ } else if (IS_UNDEFINED(this)) {
return ncr;
+ } else {
+ if (IS_UNDEFINED(x)) return ncr;
+ left = %ToPrimitive(this, NUMBER_HINT);
}
// Default implementation.
- var a = %ToPrimitive(this, NUMBER_HINT);
- var b = %ToPrimitive(x, NUMBER_HINT);
- if (IS_STRING(a) && IS_STRING(b)) {
- return %StringCompare(a, b);
+ var right = %ToPrimitive(x, NUMBER_HINT);
+ if (IS_STRING(left) && IS_STRING(right)) {
+ return %_StringCompare(left, right);
} else {
- var a_number = %ToNumber(a);
- var b_number = %ToNumber(b);
- if (NUMBER_IS_NAN(a_number) || NUMBER_IS_NAN(b_number)) return ncr;
- return %NumberCompare(a_number, b_number, ncr);
+ var left_number = %ToNumber(left);
+ var right_number = %ToNumber(right);
+ if (NUMBER_IS_NAN(left_number) || NUMBER_IS_NAN(right_number)) return ncr;
+ return %NumberCompare(left_number, right_number, ncr);
}
}
@@ -474,6 +477,17 @@ function TO_STRING() {
}
+// Specialized version of String.charAt. It assumes string as
+// the receiver type and that the index is a number.
+function STRING_CHAR_AT(pos) {
+ var char_code = %_FastCharCodeAt(this, pos);
+ if (!%_IsSmi(char_code)) {
+ return %StringCharAt(this, pos);
+ }
+ return %CharFromCode(char_code);
+}
+
+
/* -------------------------------------
- - - C o n v e r s i o n s - - -
-------------------------------------
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index a47d3730a..701e5e3ed 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -236,7 +236,7 @@ Variable* Scope::DeclareLocal(Handle<String> name, Variable::Mode mode) {
Variable* Scope::DeclareGlobal(Handle<String> name) {
ASSERT(is_global_scope());
- return variables_.Declare(this, name, Variable::DYNAMIC, true,
+ return variables_.Declare(this, name, Variable::DYNAMIC_GLOBAL, true,
Variable::NORMAL);
}
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index fe042524d..db46f3acf 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -918,7 +918,8 @@ void Serializer::Synchronize(const char* tag) {
Serializer::Serializer(SnapshotByteSink* sink)
: sink_(sink),
current_root_index_(0),
- external_reference_encoder_(NULL) {
+ external_reference_encoder_(NULL),
+ partial_(false) {
for (int i = 0; i <= LAST_SPACE; i++) {
fullness_[i] = 0;
}
@@ -946,6 +947,16 @@ void Serializer::Serialize() {
}
+void Serializer::SerializePartial(Object** object) {
+ partial_ = true;
+ external_reference_encoder_ = new ExternalReferenceEncoder();
+ this->VisitPointer(object);
+ delete external_reference_encoder_;
+ external_reference_encoder_ = NULL;
+ SerializationAddressMapper::Zap();
+}
+
+
void Serializer::VisitPointers(Object** start, Object** end) {
for (Object** current = start; current < end; current++) {
if ((*current)->IsSmi()) {
@@ -961,11 +972,30 @@ void Serializer::VisitPointers(Object** start, Object** end) {
}
+int Serializer::RootIndex(HeapObject* heap_object) {
+ for (int i = 0; i < Heap::kRootListLength; i++) {
+ Object* root = Heap::roots_address()[i];
+ if (root == heap_object) return i;
+ }
+ return kInvalidRootIndex;
+}
+
+
void Serializer::SerializeObject(
Object* o,
ReferenceRepresentation reference_representation) {
CHECK(o->IsHeapObject());
HeapObject* heap_object = HeapObject::cast(o);
+ if (partial_) {
+ int root_index = RootIndex(heap_object);
+ if (root_index != kInvalidRootIndex) {
+ sink_->Put(ROOT_SERIALIZATION, "RootSerialization");
+ sink_->PutInt(root_index, "root_index");
+ return;
+ }
+ // All the symbols that the snapshot needs should be in the root table.
+ ASSERT(!heap_object->IsSymbol());
+ }
if (SerializationAddressMapper::IsMapped(heap_object)) {
int space = SpaceOfAlreadySerializedObject(heap_object);
int address = SerializationAddressMapper::MappedTo(heap_object);
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index 96bd751da..b32f4e811 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -199,7 +199,8 @@ class SerDes: public ObjectVisitor {
SYNCHRONIZE = 36,
START_NEW_PAGE_SERIALIZATION = 37,
NATIVES_STRING_RESOURCE = 38,
- // Free: 39-47.
+ ROOT_SERIALIZATION = 39,
+ // Free: 40-47.
BACKREF_SERIALIZATION = 48,
// One per space, must be kSpaceMask aligned.
// Free: 57-63.
@@ -293,10 +294,17 @@ class SnapshotByteSink {
class Serializer : public SerDes {
public:
explicit Serializer(SnapshotByteSink* sink);
- // Serialize the current state of the heap. This operation destroys the
- // heap contents.
+ // Serialize the current state of the heap.
void Serialize();
+ // Serialize a single object and the objects reachable from it.
+ void SerializePartial(Object** obj);
void VisitPointers(Object** start, Object** end);
+ // You can call this after serialization to find out how much space was used
+ // in each space.
+ int CurrentAllocationAddress(int space) {
+ if (SpaceIsLarge(space)) space = LO_SPACE;
+ return fullness_[space];
+ }
static void Enable() {
if (!serialization_enabled_) {
@@ -366,13 +374,11 @@ class Serializer : public SerDes {
// once the map has been used for the serialization address.
static int SpaceOfAlreadySerializedObject(HeapObject* object);
int Allocate(int space, int size, bool* new_page_started);
- int CurrentAllocationAddress(int space) {
- if (SpaceIsLarge(space)) space = LO_SPACE;
- return fullness_[space];
- }
int EncodeExternalReference(Address addr) {
return external_reference_encoder_->Encode(addr);
}
+ int RootIndex(HeapObject* heap_object);
+ static const int kInvalidRootIndex = -1;
// Keep track of the fullness of each space in order to generate
// relative addresses for back references. Large objects are
@@ -382,6 +388,7 @@ class Serializer : public SerDes {
SnapshotByteSink* sink_;
int current_root_index_;
ExternalReferenceEncoder* external_reference_encoder_;
+ bool partial_;
static bool serialization_enabled_;
// Did we already make use of the fact that serialization was not enabled?
static bool too_late_to_enable_now_;
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index f4d0cb0d8..cd0939800 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -92,6 +92,7 @@ bool HeapObjectIterator::HasNextInNextPage() {
cur_addr_ = cur_page->ObjectAreaStart();
cur_limit_ = (cur_page == end_page_) ? end_addr_ : cur_page->AllocationTop();
+ if (cur_addr_ == end_addr_) return false;
ASSERT(cur_addr_ < cur_limit_);
#ifdef DEBUG
Verify();
@@ -1735,7 +1736,8 @@ void FixedSizeFreeList::Free(Address start) {
Memory::Address_at(start + i) = kZapValue;
}
#endif
- ASSERT(!FLAG_always_compact); // We only use the freelists with mark-sweep.
+ // We only use the freelists with mark-sweep.
+ ASSERT(!MarkCompactCollector::IsCompacting());
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(object_size_);
node->set_next(head_);
@@ -1821,6 +1823,50 @@ void OldSpace::MCCommitRelocationInfo() {
}
+bool NewSpace::ReserveSpace(int bytes) {
+ // We can't reliably unpack a partial snapshot that needs more new space
+ // space than the minimum NewSpace size.
+ ASSERT(bytes <= InitialCapacity());
+ Address limit = allocation_info_.limit;
+ Address top = allocation_info_.top;
+ return limit - top >= bytes;
+}
+
+
+bool PagedSpace::ReserveSpace(int bytes) {
+ Address limit = allocation_info_.limit;
+ Address top = allocation_info_.top;
+ if (limit - top >= bytes) return true;
+
+ // There wasn't enough space in the current page. Lets put the rest
+ // of the page on the free list and start a fresh page.
+ PutRestOfCurrentPageOnFreeList(TopPageOf(allocation_info_));
+
+ Page* reserved_page = TopPageOf(allocation_info_);
+ int bytes_left_to_reserve = bytes;
+ while (bytes_left_to_reserve > 0) {
+ if (!reserved_page->next_page()->is_valid()) {
+ if (Heap::OldGenerationAllocationLimitReached()) return false;
+ Expand(reserved_page);
+ }
+ bytes_left_to_reserve -= Page::kPageSize;
+ reserved_page = reserved_page->next_page();
+ if (!reserved_page->is_valid()) return false;
+ }
+ ASSERT(TopPageOf(allocation_info_)->next_page()->is_valid());
+ SetAllocationInfo(&allocation_info_,
+ TopPageOf(allocation_info_)->next_page());
+ return true;
+}
+
+
+// You have to call this last, since the implementation from PagedSpace
+// doesn't know that memory was 'promised' to large object space.
+bool LargeObjectSpace::ReserveSpace(int bytes) {
+ return Heap::OldGenerationSpaceAvailable() >= bytes;
+}
+
+
// Slow case for normal allocation. Try in order: (1) allocate in the next
// page in the space, (2) allocate off the space's free list, (3) expand the
// space, (4) fail.
@@ -1864,19 +1910,37 @@ HeapObject* OldSpace::SlowAllocateRaw(int size_in_bytes) {
}
-// Add the block at the top of the page to the space's free list, set the
-// allocation info to the next page (assumed to be one), and allocate
-// linearly there.
-HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
- int size_in_bytes) {
- ASSERT(current_page->next_page()->is_valid());
- // Add the block at the top of this page to the free list.
+void OldSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
int free_size =
static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
if (free_size > 0) {
int wasted_bytes = free_list_.Free(allocation_info_.top, free_size);
accounting_stats_.WasteBytes(wasted_bytes);
}
+}
+
+
+void FixedSpace::PutRestOfCurrentPageOnFreeList(Page* current_page) {
+ int free_size =
+ static_cast<int>(current_page->ObjectAreaEnd() - allocation_info_.top);
+ // In the fixed space free list all the free list items have the right size.
+ // We use up the rest of the page while preserving this invariant.
+ while (free_size >= object_size_in_bytes_) {
+ free_list_.Free(allocation_info_.top);
+ allocation_info_.top += object_size_in_bytes_;
+ free_size -= object_size_in_bytes_;
+ accounting_stats_.WasteBytes(object_size_in_bytes_);
+ }
+}
+
+
+// Add the block at the top of the page to the space's free list, set the
+// allocation info to the next page (assumed to be one), and allocate
+// linearly there.
+HeapObject* OldSpace::AllocateInNextPage(Page* current_page,
+ int size_in_bytes) {
+ ASSERT(current_page->next_page()->is_valid());
+ PutRestOfCurrentPageOnFreeList(current_page);
SetAllocationInfo(&allocation_info_, current_page->next_page());
return AllocateLinearly(&allocation_info_, size_in_bytes);
}
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index faeafafce..4786fb4dd 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -305,6 +305,14 @@ class Space : public Malloced {
virtual void Print() = 0;
#endif
+ // After calling this we can allocate a certain number of bytes using only
+ // linear allocation (with a LinearAllocationScope and an AlwaysAllocateScope)
+ // without using freelists or causing a GC. This is used by partial
+ // snapshots. It returns true of space was reserved or false if a GC is
+ // needed. For paged spaces the space requested must include the space wasted
+ // at the end of each when allocating linearly.
+ virtual bool ReserveSpace(int bytes) = 0;
+
private:
AllocationSpace id_;
Executability executable_;
@@ -887,6 +895,10 @@ class PagedSpace : public Space {
// collection.
inline Object* MCAllocateRaw(int size_in_bytes);
+ virtual bool ReserveSpace(int bytes);
+
+ // Used by ReserveSpace.
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page) = 0;
// ---------------------------------------------------------------------------
// Mark-compact collection support functions
@@ -993,6 +1005,9 @@ class PagedSpace : public Space {
HeapObject* SlowMCAllocateRaw(int size_in_bytes);
#ifdef DEBUG
+ // Returns the number of total pages in this space.
+ int CountTotalPages();
+
void DoPrintRSet(const char* space_name);
#endif
private:
@@ -1002,11 +1017,6 @@ class PagedSpace : public Space {
// Returns a pointer to the page of the relocation pointer.
Page* MCRelocationTopPage() { return TopPageOf(mc_forwarding_info_); }
-#ifdef DEBUG
- // Returns the number of total pages in this space.
- int CountTotalPages();
-#endif
-
friend class PageIterator;
};
@@ -1117,13 +1127,18 @@ class SemiSpace : public Space {
return static_cast<int>(addr - low());
}
- // If we don't have this here then SemiSpace will be abstract. However
- // it should never be called.
+ // If we don't have these here then SemiSpace will be abstract. However
+ // they should never be called.
virtual int Size() {
UNREACHABLE();
return 0;
}
+ virtual bool ReserveSpace(int bytes) {
+ UNREACHABLE();
+ return false;
+ }
+
bool is_committed() { return committed_; }
bool Commit();
bool Uncommit();
@@ -1347,6 +1362,8 @@ class NewSpace : public Space {
bool ToSpaceContains(Address a) { return to_space_.Contains(a); }
bool FromSpaceContains(Address a) { return from_space_.Contains(a); }
+ virtual bool ReserveSpace(int bytes);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
virtual void Protect();
@@ -1633,6 +1650,8 @@ class OldSpace : public PagedSpace {
// collection.
virtual void MCCommitRelocationInfo();
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
#ifdef DEBUG
// Reports statistics for the space
void ReportStatistics();
@@ -1694,6 +1713,8 @@ class FixedSpace : public PagedSpace {
// collection.
virtual void MCCommitRelocationInfo();
+ virtual void PutRestOfCurrentPageOnFreeList(Page* current_page);
+
#ifdef DEBUG
// Reports statistic info of the space
void ReportStatistics();
@@ -1710,6 +1731,10 @@ class FixedSpace : public PagedSpace {
// the page after current_page (there is assumed to be one).
HeapObject* AllocateInNextPage(Page* current_page, int size_in_bytes);
+ void ResetFreeList() {
+ free_list_.Reset();
+ }
+
private:
// The size of objects in this space.
int object_size_in_bytes_;
@@ -1740,12 +1765,81 @@ class MapSpace : public FixedSpace {
// Constants.
static const int kMaxMapPageIndex = (1 << MapWord::kMapPageIndexBits) - 1;
+ // Are map pointers encodable into map word?
+ bool MapPointersEncodable() {
+ if (!FLAG_use_big_map_space) {
+ ASSERT(CountTotalPages() <= kMaxMapPageIndex);
+ return true;
+ }
+ int n_of_pages = Capacity() / Page::kObjectAreaSize;
+ ASSERT(n_of_pages == CountTotalPages());
+ return n_of_pages <= kMaxMapPageIndex;
+ }
+
+ // Should be called after forced sweep to find out if map space needs
+ // compaction.
+ bool NeedsCompaction(int live_maps) {
+ return !MapPointersEncodable() && live_maps <= kCompactionThreshold;
+ }
+
+ Address TopAfterCompaction(int live_maps) {
+ ASSERT(NeedsCompaction(live_maps));
+
+ int pages_left = live_maps / kMapsPerPage;
+ PageIterator it(this, PageIterator::ALL_PAGES);
+ while (pages_left-- > 0) {
+ ASSERT(it.has_next());
+ it.next()->ClearRSet();
+ }
+ ASSERT(it.has_next());
+ Page* top_page = it.next();
+ top_page->ClearRSet();
+ ASSERT(top_page->is_valid());
+
+ int offset = live_maps % kMapsPerPage * Map::kSize;
+ Address top = top_page->ObjectAreaStart() + offset;
+ ASSERT(top < top_page->ObjectAreaEnd());
+ ASSERT(Contains(top));
+
+ return top;
+ }
+
+ void FinishCompaction(Address new_top, int live_maps) {
+ Page* top_page = Page::FromAddress(new_top);
+ ASSERT(top_page->is_valid());
+
+ SetAllocationInfo(&allocation_info_, top_page);
+ allocation_info_.top = new_top;
+
+ int new_size = live_maps * Map::kSize;
+ accounting_stats_.DeallocateBytes(accounting_stats_.Size());
+ accounting_stats_.AllocateBytes(new_size);
+
+#ifdef DEBUG
+ if (FLAG_enable_slow_asserts) {
+ int actual_size = 0;
+ for (Page* p = first_page_; p != top_page; p = p->next_page())
+ actual_size += kMapsPerPage * Map::kSize;
+ actual_size += (new_top - top_page->ObjectAreaStart());
+ ASSERT(accounting_stats_.Size() == actual_size);
+ }
+#endif
+
+ Shrink();
+ ResetFreeList();
+ }
+
protected:
#ifdef DEBUG
virtual void VerifyObject(HeapObject* obj);
#endif
private:
+ static const int kMapsPerPage = Page::kObjectAreaSize / Map::kSize;
+
+ // Do map space compaction if there is a page gap.
+ static const int kCompactionThreshold = kMapsPerPage * (kMaxMapPageIndex - 1);
+
// An array of page start address in a map space.
Address page_addresses_[kMaxMapPageIndex + 1];
@@ -1890,6 +1984,11 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty.
bool IsEmpty() { return first_chunk_ == NULL; }
+ // See the comments for ReserveSpace in the Space class. This has to be
+ // called after ReserveSpace has been called on the paged spaces, since they
+ // may use some memory, leaving less for large objects.
+ virtual bool ReserveSpace(int bytes);
+
#ifdef ENABLE_HEAP_PROTECTION
// Protect/unprotect the space by marking it read-only/writable.
void Protect();
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 4f9957a63..ed938ecfa 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -87,12 +87,14 @@ function StringCharCodeAt(pos) {
// ECMA-262, section 15.5.4.6
function StringConcat() {
- var len = %_ArgumentsLength();
- var parts = new $Array(len + 1);
- parts[0] = ToString(this);
- for (var i = 0; i < len; i++)
- parts[i + 1] = ToString(%_Arguments(i));
- return parts.join('');
+ var len = %_ArgumentsLength() + 1;
+ var parts = new $Array(len);
+ parts[0] = IS_STRING(this) ? this : ToString(this);
+ for (var i = 1; i < len; i++) {
+ var part = %_Arguments(i - 1);
+ parts[i] = IS_STRING(part) ? part : ToString(part);
+ }
+ return %StringBuilderConcat(parts, len, "");
}
// Match ES3 and Safari
@@ -180,7 +182,7 @@ function SubString(string, start, end) {
}
return %CharFromCode(char_code);
}
- return %SubString(string, start, end);
+ return %_SubString(string, start, end);
}
@@ -194,7 +196,7 @@ var reusableMatchInfo = [2, "", "", -1, -1];
// ECMA-262, section 15.5.4.11
function StringReplace(search, replace) {
- var subject = ToString(this);
+ var subject = IS_STRING(this) ? this : ToString(this);
// Delegate to one of the regular expression variants if necessary.
if (IS_REGEXP(search)) {
@@ -207,7 +209,7 @@ function StringReplace(search, replace) {
}
// Convert the search argument to a string and search for it.
- search = ToString(search);
+ search = IS_STRING(search) ? search : ToString(search);
var start = %StringIndexOf(subject, search, 0);
if (start < 0) return subject;
var end = start + search.length;
@@ -222,7 +224,8 @@ function StringReplace(search, replace) {
} else {
reusableMatchInfo[CAPTURE0] = start;
reusableMatchInfo[CAPTURE1] = end;
- ExpandReplacement(ToString(replace), subject, reusableMatchInfo, builder);
+ if (!IS_STRING(replace)) replace = ToString(replace);
+ ExpandReplacement(replace, subject, reusableMatchInfo, builder);
}
// suffix
@@ -505,7 +508,7 @@ function StringSlice(start, end) {
// ECMA-262 section 15.5.4.14
function StringSplit(separator, limit) {
var subject = ToString(this);
- limit = (limit === void 0) ? 0xffffffff : ToUint32(limit);
+ limit = (IS_UNDEFINED(limit)) ? 0xffffffff : TO_UINT32(limit);
if (limit === 0) return [];
// ECMA-262 says that if separator is undefined, the result should
@@ -604,22 +607,30 @@ function splitMatch(separator, subject, current_index, start_index) {
// ECMA-262 section 15.5.4.15
function StringSubstring(start, end) {
- var s = ToString(this);
+ var s = this;
+ if (!IS_STRING(s)) s = ToString(s);
var s_len = s.length;
+
var start_i = TO_INTEGER(start);
+ if (start_i < 0) {
+ start_i = 0;
+ } else if (start_i > s_len) {
+ start_i = s_len;
+ }
+
var end_i = s_len;
- if (!IS_UNDEFINED(end))
+ if (!IS_UNDEFINED(end)) {
end_i = TO_INTEGER(end);
-
- if (start_i < 0) start_i = 0;
- if (start_i > s_len) start_i = s_len;
- if (end_i < 0) end_i = 0;
- if (end_i > s_len) end_i = s_len;
-
- if (start_i > end_i) {
- var tmp = end_i;
- end_i = start_i;
- start_i = tmp;
+ if (end_i > s_len) {
+ end_i = s_len;
+ } else {
+ if (end_i < 0) end_i = 0;
+ if (start_i > end_i) {
+ var tmp = end_i;
+ end_i = start_i;
+ start_i = tmp;
+ }
+ }
}
return SubString(s, start_i, end_i);
@@ -790,21 +801,14 @@ function StringSup() {
}
-// StringBuilder support.
-
-function StringBuilder() {
- this.elements = new $Array();
-}
-
-
+// ReplaceResultBuilder support.
function ReplaceResultBuilder(str) {
this.elements = new $Array();
this.special_string = str;
}
-ReplaceResultBuilder.prototype.add =
-StringBuilder.prototype.add = function(str) {
+ReplaceResultBuilder.prototype.add = function(str) {
if (!IS_STRING(str)) str = ToString(str);
if (str.length > 0) {
var elements = this.elements;
@@ -828,13 +832,9 @@ ReplaceResultBuilder.prototype.addSpecialSlice = function(start, end) {
}
-StringBuilder.prototype.generate = function() {
- return %StringBuilderConcat(this.elements, "");
-}
-
-
ReplaceResultBuilder.prototype.generate = function() {
- return %StringBuilderConcat(this.elements, this.special_string);
+ var elements = this.elements;
+ return %StringBuilderConcat(elements, elements.length, this.special_string);
}
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 08ee16ff2..374385b67 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -40,6 +40,7 @@ namespace internal {
// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
// figure 3-3, page 48, where the function is called clp2.
uint32_t RoundUpToPowerOf2(uint32_t x) {
+ ASSERT(x <= 0x80000000u);
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 158824d0e..fb1e9265e 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -153,7 +153,13 @@ namespace internal {
SC(generic_binary_stub_calls, V8.GenericBinaryStubCalls) \
SC(generic_binary_stub_calls_regs, V8.GenericBinaryStubCallsRegs) \
SC(string_add_runtime, V8.StringAddRuntime) \
- SC(string_add_native, V8.StringAddNative)
+ SC(string_add_native, V8.StringAddNative) \
+ SC(sub_string_runtime, V8.SubStringRuntime) \
+ SC(sub_string_native, V8.SubStringNative) \
+ SC(string_compare_native, V8.StringCompareNative) \
+ SC(string_compare_runtime, V8.StringCompareRuntime) \
+ SC(regexp_entry_runtime, V8.RegExpEntryRuntime) \
+ SC(regexp_entry_native, V8.RegExpEntryNative)
// This file contains all the v8 counters that are in use.
class Counters : AllStatic {
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index a66409558..700b9e47e 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -305,6 +305,22 @@ function IsInconsistentDescriptor(desc) {
return IsAccessorDescriptor(desc) && IsDataDescriptor(desc);
}
+// ES5 8.10.4
+function FromPropertyDescriptor(desc) {
+ if(IS_UNDEFINED(desc)) return desc;
+ var obj = new $Object();
+ if (IsDataDescriptor(desc)) {
+ obj.value = desc.getValue();
+ obj.writable = desc.isWritable();
+ }
+ if (IsAccessorDescriptor(desc)) {
+ obj.get = desc.getGet();
+ obj.set = desc.getSet();
+ }
+ obj.enumerable = desc.isEnumerable();
+ obj.configurable = desc.isConfigurable();
+ return obj;
+}
// ES5 8.10.5.
function ToPropertyDescriptor(obj) {
@@ -433,6 +449,33 @@ PropertyDescriptor.prototype.getSet = function() {
}
+// ES5 section 8.12.1.
+function GetOwnProperty(obj, p) {
+ var desc = new PropertyDescriptor();
+
+ // An array with:
+ // obj is a data property [false, value, Writeable, Enumerable, Configurable]
+ // obj is an accessor [true, Get, Set, Enumerable, Configurable]
+ var props = %GetOwnProperty(ToObject(obj), ToString(p));
+
+ if (IS_UNDEFINED(props))
+ return void 0;
+
+ // This is an accessor
+ if (props[0]) {
+ desc.setGet(props[1]);
+ desc.setSet(props[2]);
+ } else {
+ desc.setValue(props[1]);
+ desc.setWritable(props[2]);
+ }
+ desc.setEnumerable(props[3]);
+ desc.setConfigurable(props[4]);
+
+ return desc;
+}
+
+
// ES5 8.12.9. This version cannot cope with the property p already
// being present on obj.
function DefineOwnProperty(obj, p, desc, should_throw) {
@@ -448,6 +491,25 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
+// ES5 section 15.2.3.2.
+function ObjectGetPrototypeOf(obj) {
+ if (!IS_OBJECT(obj) && !IS_FUNCTION(obj)) {
+ throw MakeTypeError("object_get_prototype_non_object", [obj]);
+ }
+ return obj.__proto__;
+}
+
+
+// ES5 section 15.2.3.3
+function ObjectGetOwnPropertyDescriptor(obj, p) {
+ if (!IS_OBJECT(obj) && !IS_FUNCTION(obj)) {
+ throw MakeTypeError("object_get_prototype_non_object", [obj]);
+ }
+ var desc = GetOwnProperty(obj, p);
+ return FromPropertyDescriptor(desc);
+}
+
+
// ES5 section 15.2.3.5.
function ObjectCreate(proto, properties) {
if (!IS_OBJECT(proto) && !IS_NULL(proto)) {
@@ -512,7 +574,9 @@ function SetupObject() {
));
InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys,
- "create", ObjectCreate
+ "create", ObjectCreate,
+ "getPrototypeOf", ObjectGetPrototypeOf,
+ "getOwnPropertyDescriptor", ObjectGetOwnPropertyDescriptor
));
}
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index bcfd7fa9c..00052d1cb 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -34,8 +34,8 @@
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 2
#define MINOR_VERSION 0
-#define BUILD_NUMBER 5
-#define PATCH_LEVEL 4
+#define BUILD_NUMBER 6
+#define PATCH_LEVEL 1
#define CANDIDATE_VERSION false
// Define SONAME to have the SCons build the put a specific SONAME into the
diff --git a/deps/v8/src/x64/codegen-x64-inl.h b/deps/v8/src/x64/codegen-x64-inl.h
index 6869fc935..60e9ab03a 100644
--- a/deps/v8/src/x64/codegen-x64-inl.h
+++ b/deps/v8/src/x64/codegen-x64-inl.h
@@ -39,16 +39,6 @@ namespace internal {
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
-
-void CodeGenerator::GenerateMathSin(ZoneList<Expression*>* args) {
- GenerateFastMathOp(SIN, args);
-}
-
-
-void CodeGenerator::GenerateMathCos(ZoneList<Expression*>* args) {
- GenerateFastMathOp(COS, args);
-}
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 85ccb676c..e912bbcff 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -326,12 +326,19 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
function_return_is_shadowed_ = false;
// Allocate the local context if needed.
- if (scope_->num_heap_slots() > 0) {
+ int heap_slots = scope_->num_heap_slots();
+ if (heap_slots > 0) {
Comment cmnt(masm_, "[ allocate local context");
// Allocate local context.
// Get outer context and create a new context based on it.
frame_->PushFunction();
- Result context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ Result context;
+ if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+ FastNewContextStub stub(heap_slots);
+ context = frame_->CallStub(&stub, 1);
+ } else {
+ context = frame_->CallRuntime(Runtime::kNewContext, 1);
+ }
// Update context local.
frame_->SaveContextRegister();
@@ -393,6 +400,12 @@ void CodeGenerator::GenCode(FunctionLiteral* function) {
StoreArgumentsObject(true);
}
+ // Initialize ThisFunction reference if present.
+ if (scope_->is_function_scope() && scope_->function() != NULL) {
+ frame_->Push(Factory::the_hole_value());
+ StoreToSlot(scope_->function()->slot(), NOT_CONST_INIT);
+ }
+
// Generate code to 'execute' declarations and initialize functions
// (source elements). In case of an illegal redeclaration we need to
// handle that instead of processing the declarations.
@@ -1865,13 +1878,9 @@ void CodeGenerator::VisitTryCatchStatement(TryCatchStatement* node) {
frame_->EmitPush(rax);
// Store the caught exception in the catch variable.
- { Reference ref(this, node->catch_var());
- ASSERT(ref.is_slot());
- // Load the exception to the top of the stack. Here we make use of the
- // convenient property that it doesn't matter whether a value is
- // immediately on top of or underneath a zero-sized reference.
- ref.SetValue(NOT_CONST_INIT);
- }
+ Variable* catch_var = node->catch_var()->var();
+ ASSERT(catch_var != NULL && catch_var->slot() != NULL);
+ StoreToSlot(catch_var->slot(), NOT_CONST_INIT);
// Remove the exception from the stack.
frame_->Drop();
@@ -2196,19 +2205,28 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
void CodeGenerator::InstantiateBoilerplate(Handle<JSFunction> boilerplate) {
- // Call the runtime to instantiate the function boilerplate object.
+ ASSERT(boilerplate->IsBoilerplate());
+
// The inevitable call will sync frame elements to memory anyway, so
// we do it eagerly to allow us to push the arguments directly into
// place.
- ASSERT(boilerplate->IsBoilerplate());
frame_->SyncRange(0, frame_->element_count() - 1);
- // Create a new closure.
- frame_->EmitPush(rsi);
- __ movq(kScratchRegister, boilerplate, RelocInfo::EMBEDDED_OBJECT);
- frame_->EmitPush(kScratchRegister);
- Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
- frame_->Push(&result);
+ // Use the fast case closure allocation code that allocates in new
+ // space for nested functions that don't need literals cloning.
+ if (scope()->is_function_scope() && boilerplate->NumberOfLiterals() == 0) {
+ FastNewClosureStub stub;
+ frame_->Push(boilerplate);
+ Result answer = frame_->CallStub(&stub, 1);
+ frame_->Push(&answer);
+ } else {
+ // Call the runtime to instantiate the function boilerplate
+ // object.
+ frame_->EmitPush(rsi);
+ frame_->EmitPush(boilerplate);
+ Result result = frame_->CallRuntime(Runtime::kNewClosure, 2);
+ frame_->Push(&result);
+ }
}
@@ -2362,46 +2380,10 @@ void CodeGenerator::VisitRegExpLiteral(RegExpLiteral* node) {
}
-// Materialize the object literal 'node' in the literals array
-// 'literals' of the function. Leave the object boilerplate in
-// 'boilerplate'.
-class DeferredObjectLiteral: public DeferredCode {
- public:
- DeferredObjectLiteral(Register boilerplate,
- Register literals,
- ObjectLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredObjectLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ObjectLiteral* node_;
-};
-
-
-void DeferredObjectLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // Constant properties (2).
- __ Push(node_->constant_properties());
- __ CallRuntime(Runtime::kCreateObjectLiteralBoilerplate, 3);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
Comment cmnt(masm_, "[ ObjectLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -2411,32 +2393,18 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
// Load the literals array of the function.
__ movq(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredObjectLiteral* deferred =
- new DeferredObjectLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the boilerplate object.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant properties.
+ frame_->Push(node->constant_properties());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateObjectLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
for (int i = 0; i < node->properties()->length(); i++) {
@@ -2496,45 +2464,10 @@ void CodeGenerator::VisitObjectLiteral(ObjectLiteral* node) {
}
-// Materialize the array literal 'node' in the literals array 'literals'
-// of the function. Leave the array boilerplate in 'boilerplate'.
-class DeferredArrayLiteral: public DeferredCode {
- public:
- DeferredArrayLiteral(Register boilerplate,
- Register literals,
- ArrayLiteral* node)
- : boilerplate_(boilerplate), literals_(literals), node_(node) {
- set_comment("[ DeferredArrayLiteral");
- }
-
- void Generate();
-
- private:
- Register boilerplate_;
- Register literals_;
- ArrayLiteral* node_;
-};
-
-
-void DeferredArrayLiteral::Generate() {
- // Since the entry is undefined we call the runtime system to
- // compute the literal.
- // Literal array (0).
- __ push(literals_);
- // Literal index (1).
- __ Push(Smi::FromInt(node_->literal_index()));
- // Constant properties (2).
- __ Push(node_->literals());
- __ CallRuntime(Runtime::kCreateArrayLiteralBoilerplate, 3);
- if (!boilerplate_.is(rax)) __ movq(boilerplate_, rax);
-}
-
-
void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
Comment cmnt(masm_, "[ ArrayLiteral");
- // Retrieve the literals array and check the allocated entry. Begin
- // with a writable copy of the function of this activation in a
+ // Load a writable copy of the function of this activation in a
// register.
frame_->PushFunction();
Result literals = frame_->Pop();
@@ -2544,32 +2477,18 @@ void CodeGenerator::VisitArrayLiteral(ArrayLiteral* node) {
// Load the literals array of the function.
__ movq(literals.reg(),
FieldOperand(literals.reg(), JSFunction::kLiteralsOffset));
-
- // Load the literal at the ast saved index.
- Result boilerplate = allocator_->Allocate();
- ASSERT(boilerplate.is_valid());
- int literal_offset =
- FixedArray::kHeaderSize + node->literal_index() * kPointerSize;
- __ movq(boilerplate.reg(), FieldOperand(literals.reg(), literal_offset));
-
- // Check whether we need to materialize the object literal boilerplate.
- // If so, jump to the deferred code passing the literals array.
- DeferredArrayLiteral* deferred =
- new DeferredArrayLiteral(boilerplate.reg(), literals.reg(), node);
- __ CompareRoot(boilerplate.reg(), Heap::kUndefinedValueRootIndex);
- deferred->Branch(equal);
- deferred->BindExit();
- literals.Unuse();
-
- // Push the resulting array literal boilerplate on the stack.
- frame_->Push(&boilerplate);
- // Clone the boilerplate object.
- Runtime::FunctionId clone_function_id = Runtime::kCloneLiteralBoilerplate;
- if (node->depth() == 1) {
- clone_function_id = Runtime::kCloneShallowLiteralBoilerplate;
+ // Literal array.
+ frame_->Push(&literals);
+ // Literal index.
+ frame_->Push(Smi::FromInt(node->literal_index()));
+ // Constant elements.
+ frame_->Push(node->constant_elements());
+ Result clone;
+ if (node->depth() > 1) {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteral, 3);
+ } else {
+ clone = frame_->CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
}
- Result clone = frame_->CallRuntime(clone_function_id, 1);
- // Push the newly cloned literal object as the result.
frame_->Push(&clone);
// Generate code to set the elements in the array that are not
@@ -2770,23 +2689,19 @@ void CodeGenerator::VisitCall(Call* node) {
frame_->Push(Factory::undefined_value());
}
+ // Push the receiver.
+ frame_->PushParameterAt(-1);
+
// Resolve the call.
Result result =
- frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 2);
+ frame_->CallRuntime(Runtime::kResolvePossiblyDirectEval, 3);
- // Touch up the stack with the right values for the function and the
- // receiver. Use a scratch register to avoid destroying the result.
- Result scratch = allocator_->Allocate();
- ASSERT(scratch.is_valid());
- __ movq(scratch.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(0)));
- frame_->SetElementAt(arg_count + 1, &scratch);
-
- // We can reuse the result register now.
- frame_->Spill(result.reg());
- __ movq(result.reg(),
- FieldOperand(result.reg(), FixedArray::OffsetOfElementAt(1)));
- frame_->SetElementAt(arg_count, &result);
+ // The runtime call returns a pair of values in rax (function) and
+ // rdx (receiver). Touch up the stack with the right values.
+ Result receiver = allocator_->Allocate(rdx);
+ frame_->SetElementAt(arg_count + 1, &result);
+ frame_->SetElementAt(arg_count, &receiver);
+ receiver.Unuse();
// Call the function.
CodeForSourcePosition(node->position());
@@ -3109,7 +3024,7 @@ void CodeGenerator::VisitUnaryOperation(UnaryOperation* node) {
bool overwrite =
(node->expression()->AsBinaryOperation() != NULL &&
node->expression()->AsBinaryOperation()->ResultOverwriteAllowed());
- UnarySubStub stub(overwrite);
+ GenericUnaryOpStub stub(Token::SUB, overwrite);
// TODO(1222589): remove dependency of TOS being cached inside stub
Result operand = frame_->Pop();
Result answer = frame_->CallStub(&stub, &operand);
@@ -3979,80 +3894,50 @@ void CodeGenerator::GenerateRandomPositiveSmi(ZoneList<Expression*>* args) {
}
-void CodeGenerator::GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args) {
- JumpTarget done;
- JumpTarget call_runtime;
- ASSERT(args->length() == 1);
+void CodeGenerator::GenerateRegExpExec(ZoneList<Expression*>* args) {
+ ASSERT_EQ(args->length(), 4);
- // Load number and duplicate it.
+ // Load the arguments on the stack and call the runtime system.
Load(args->at(0));
- frame_->Dup();
+ Load(args->at(1));
+ Load(args->at(2));
+ Load(args->at(3));
+ Result result = frame_->CallRuntime(Runtime::kRegExpExec, 4);
+ frame_->Push(&result);
+}
- // Get the number into an unaliased register and load it onto the
- // floating point stack still leaving one copy on the frame.
- Result number = frame_->Pop();
- number.ToRegister();
- frame_->Spill(number.reg());
- FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
- number.Unuse();
- // Perform the operation on the number.
- switch (op) {
- case SIN:
- __ fsin();
- break;
- case COS:
- __ fcos();
- break;
- }
+void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+ ASSERT_EQ(2, args->length());
- // Go slow case if argument to operation is out of range.
- Result eax_reg = allocator()->Allocate(rax);
- ASSERT(eax_reg.is_valid());
- __ fnstsw_ax();
- __ testl(rax, Immediate(0x0400)); // Bit 10 is condition flag C2.
- eax_reg.Unuse();
- call_runtime.Branch(not_zero);
-
- // Allocate heap number for result if possible.
- Result scratch = allocator()->Allocate();
- Result heap_number = allocator()->Allocate();
- __ AllocateHeapNumber(heap_number.reg(),
- scratch.reg(),
- call_runtime.entry_label());
- scratch.Unuse();
+ Load(args->at(0));
+ Load(args->at(1));
- // Store the result in the allocated heap number.
- __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
- // Replace the extra copy of the argument with the result.
- frame_->SetElementAt(0, &heap_number);
- done.Jump();
+ StringAddStub stub(NO_STRING_ADD_FLAGS);
+ Result answer = frame_->CallStub(&stub, 2);
+ frame_->Push(&answer);
+}
- call_runtime.Bind();
- // Free ST(0) which was not popped before calling into the runtime.
- __ ffree(0);
- Result answer;
- switch (op) {
- case SIN:
- answer = frame_->CallRuntime(Runtime::kMath_sin, 1);
- break;
- case COS:
- answer = frame_->CallRuntime(Runtime::kMath_cos, 1);
- break;
- }
+
+void CodeGenerator::GenerateSubString(ZoneList<Expression*>* args) {
+ ASSERT_EQ(3, args->length());
+
+ Load(args->at(0));
+ Load(args->at(1));
+ Load(args->at(2));
+
+ Result answer = frame_->CallRuntime(Runtime::kSubString, 3);
frame_->Push(&answer);
- done.Bind();
}
-void CodeGenerator::GenerateStringAdd(ZoneList<Expression*>* args) {
+void CodeGenerator::GenerateStringCompare(ZoneList<Expression*>* args) {
ASSERT_EQ(2, args->length());
Load(args->at(0));
Load(args->at(1));
- StringAddStub stub(NO_STRING_ADD_FLAGS);
- Result answer = frame_->CallStub(&stub, 2);
+ Result answer = frame_->CallRuntime(Runtime::kStringCompare, 2);
frame_->Push(&answer);
}
@@ -4380,15 +4265,7 @@ void CodeGenerator::LoadReference(Reference* ref) {
// The expression is either a property or a variable proxy that rewrites
// to a property.
Load(property->obj());
- // We use a named reference if the key is a literal symbol, unless it is
- // a string that can be legally parsed as an integer. This is because
- // otherwise we will not get into the slow case code that handles [] on
- // String objects.
- Literal* literal = property->key()->AsLiteral();
- uint32_t dummy;
- if (literal != NULL &&
- literal->handle()->IsSymbol() &&
- !String::cast(*(literal->handle()))->AsArrayIndex(&dummy)) {
+ if (property->key()->IsPropertyName()) {
ref->set_type(Reference::NAMED);
} else {
Load(property->key());
@@ -4864,36 +4741,34 @@ Result CodeGenerator::StoreArgumentsObject(bool initial) {
frame_->Push(&result);
}
- { Reference shadow_ref(this, scope_->arguments_shadow());
- Reference arguments_ref(this, scope_->arguments());
- ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
- // Here we rely on the convenient property that references to slot
- // take up zero space in the frame (ie, it doesn't matter that the
- // stored value is actually below the reference on the frame).
- JumpTarget done;
- bool skip_arguments = false;
- if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
- // We have to skip storing into the arguments slot if it has
- // already been written to. This can happen if the a function
- // has a local variable named 'arguments'.
- LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
- Result arguments = frame_->Pop();
- if (arguments.is_constant()) {
- // We have to skip updating the arguments object if it has
- // been assigned a proper value.
- skip_arguments = !arguments.handle()->IsTheHole();
- } else {
- __ CompareRoot(arguments.reg(), Heap::kTheHoleValueRootIndex);
- arguments.Unuse();
- done.Branch(not_equal);
- }
- }
- if (!skip_arguments) {
- arguments_ref.SetValue(NOT_CONST_INIT);
- if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+
+ Variable* arguments = scope_->arguments()->var();
+ Variable* shadow = scope_->arguments_shadow()->var();
+ ASSERT(arguments != NULL && arguments->slot() != NULL);
+ ASSERT(shadow != NULL && shadow->slot() != NULL);
+ JumpTarget done;
+ bool skip_arguments = false;
+ if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
+ // We have to skip storing into the arguments slot if it has
+ // already been written to. This can happen if the a function
+ // has a local variable named 'arguments'.
+ LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
+ Result probe = frame_->Pop();
+ if (probe.is_constant()) {
+ // We have to skip updating the arguments object if it has been
+ // assigned a proper value.
+ skip_arguments = !probe.handle()->IsTheHole();
+ } else {
+ __ CompareRoot(probe.reg(), Heap::kTheHoleValueRootIndex);
+ probe.Unuse();
+ done.Branch(not_equal);
}
- shadow_ref.SetValue(NOT_CONST_INIT);
}
+ if (!skip_arguments) {
+ StoreToSlot(arguments->slot(), NOT_CONST_INIT);
+ if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
+ }
+ StoreToSlot(shadow->slot(), NOT_CONST_INIT);
return frame_->Pop();
}
@@ -6199,6 +6074,91 @@ void Reference::SetValue(InitState init_state) {
}
+void FastNewClosureStub::Generate(MacroAssembler* masm) {
+ // Clone the boilerplate in new space. Set the context to the
+ // current context in rsi.
+ Label gc;
+ __ AllocateInNewSpace(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the boilerplate function from the stack.
+ __ movq(rdx, Operand(rsp, 1 * kPointerSize));
+
+ // Compute the function map in the current global context and set that
+ // as the map of the allocated object.
+ __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalContextOffset));
+ __ movq(rcx, Operand(rcx, Context::SlotOffset(Context::FUNCTION_MAP_INDEX)));
+ __ movq(FieldOperand(rax, JSObject::kMapOffset), rcx);
+
+ // Clone the rest of the boilerplate fields. We don't have to update
+ // the write barrier because the allocated object is in new space.
+ for (int offset = kPointerSize;
+ offset < JSFunction::kSize;
+ offset += kPointerSize) {
+ if (offset == JSFunction::kContextOffset) {
+ __ movq(FieldOperand(rax, offset), rsi);
+ } else {
+ __ movq(rbx, FieldOperand(rdx, offset));
+ __ movq(FieldOperand(rax, offset), rbx);
+ }
+ }
+
+ // Return and remove the on-stack parameter.
+ __ ret(1 * kPointerSize);
+
+ // Create a new closure through the slower runtime call.
+ __ bind(&gc);
+ __ pop(rcx); // Temporarily remove return address.
+ __ pop(rdx);
+ __ push(rsi);
+ __ push(rdx);
+ __ push(rcx); // Restore return address.
+ __ TailCallRuntime(ExternalReference(Runtime::kNewClosure), 2, 1);
+}
+
+
+void FastNewContextStub::Generate(MacroAssembler* masm) {
+ // Try to allocate the context in new space.
+ Label gc;
+ int length = slots_ + Context::MIN_CONTEXT_SLOTS;
+ __ AllocateInNewSpace((length * kPointerSize) + FixedArray::kHeaderSize,
+ rax, rbx, rcx, &gc, TAG_OBJECT);
+
+ // Get the function from the stack.
+ __ movq(rcx, Operand(rsp, 1 * kPointerSize));
+
+ // Setup the object header.
+ __ LoadRoot(kScratchRegister, Heap::kContextMapRootIndex);
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister);
+ __ movl(FieldOperand(rax, Array::kLengthOffset), Immediate(length));
+
+ // Setup the fixed slots.
+ __ xor_(rbx, rbx); // Set to NULL.
+ __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)), rax);
+ __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rbx);
+ __ movq(Operand(rax, Context::SlotOffset(Context::EXTENSION_INDEX)), rbx);
+
+ // Copy the global object from the surrounding context.
+ __ movq(rbx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_INDEX)));
+ __ movq(Operand(rax, Context::SlotOffset(Context::GLOBAL_INDEX)), rbx);
+
+ // Initialize the rest of the slots to undefined.
+ __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
+ for (int i = Context::MIN_CONTEXT_SLOTS; i < length; i++) {
+ __ movq(Operand(rax, Context::SlotOffset(i)), rbx);
+ }
+
+ // Return and remove the on-stack parameter.
+ __ movq(rsi, rax);
+ __ ret(1 * kPointerSize);
+
+ // Need to collect. Call into runtime system.
+ __ bind(&gc);
+ __ TailCallRuntime(ExternalReference(Runtime::kNewContext), 1, 1);
+}
+
+
void ToBooleanStub::Generate(MacroAssembler* masm) {
Label false_result, true_result, not_string;
__ movq(rax, Operand(rsp, 1 * kPointerSize));
@@ -6338,7 +6298,9 @@ bool CodeGenerator::FoldConstantSmis(Token::Value op, int left, int right) {
// End of CodeGenerator implementation.
-void UnarySubStub::Generate(MacroAssembler* masm) {
+void GenericUnaryOpStub::Generate(MacroAssembler* masm) {
+ ASSERT(op_ == Token::SUB);
+
Label slow;
Label done;
Label try_float;
@@ -6406,34 +6368,39 @@ void CompareStub::Generate(MacroAssembler* masm) {
// Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
// so we do the second best thing - test it ourselves.
- Label return_equal;
- Label heap_number;
- // If it's not a heap number, then return equal.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- Factory::heap_number_map());
- __ j(equal, &heap_number);
- __ bind(&return_equal);
- __ xor_(rax, rax);
- __ ret(0);
+ if (never_nan_nan_) {
+ __ xor_(rax, rax);
+ __ ret(0);
+ } else {
+ Label return_equal;
+ Label heap_number;
+ // If it's not a heap number, then return equal.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ Factory::heap_number_map());
+ __ j(equal, &heap_number);
+ __ bind(&return_equal);
+ __ xor_(rax, rax);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only allow QNaNs, which have bit 51 set (which also rules out
- // the value being Infinity).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
- __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ xorl(rax, rax);
- __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
- __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
- __ setcc(above_equal, rax);
- __ ret(0);
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only allow QNaNs, which have bit 51 set (which also rules out
+ // the value being Infinity).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
+ __ movl(rdx, FieldOperand(rdx, HeapNumber::kExponentOffset));
+ __ xorl(rax, rax);
+ __ addl(rdx, rdx); // Shift value and mask so mask applies to top bits.
+ __ cmpl(rdx, Immediate(kQuietNaNHighBitsMask << 1));
+ __ setcc(above_equal, rax);
+ __ ret(0);
+ }
__ bind(&not_identical);
}
@@ -6580,9 +6547,11 @@ void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
__ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzxbq(scratch,
FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, Immediate(kIsSymbolMask | kIsNotStringMask));
- __ cmpb(scratch, Immediate(kSymbolTag | kStringTag));
- __ j(not_equal, label);
+ // Ensure that no non-strings have the symbol bit set.
+ ASSERT(kNotStringTag + kIsSymbolMask > LAST_TYPE);
+ ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
}
@@ -6761,16 +6730,13 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
__ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset));
__ SmiCompare(Operand(rdx, StandardFrameConstants::kContextOffset),
Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
- __ j(equal, &adaptor);
-
- // Nothing to do: The formal number of parameters has already been
- // passed in register rax by calling function. Just return it.
- __ ret(0);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it.
- __ bind(&adaptor);
- __ movq(rax, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
+ // Otherwise nothing to do: The number of formal parameters has already been
+ // passed in register eax by calling function. Just return it.
+ __ cmovq(equal, rax,
+ Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ ret(0);
}
@@ -7885,9 +7851,52 @@ void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
int CompareStub::MinorKey() {
- // Encode the two parameters in a unique 16 bit value.
- ASSERT(static_cast<unsigned>(cc_) < (1 << 15));
- return (static_cast<unsigned>(cc_) << 1) | (strict_ ? 1 : 0);
+ // Encode the three parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ int nnn_value = (never_nan_nan_ ? 2 : 0);
+ if (cc_ != equal) nnn_value = 0; // Avoid duplicate stubs.
+ return (static_cast<unsigned>(cc_) << 2) | nnn_value | (strict_ ? 1 : 0);
+}
+
+
+const char* CompareStub::GetName() {
+ switch (cc_) {
+ case less: return "CompareStub_LT";
+ case greater: return "CompareStub_GT";
+ case less_equal: return "CompareStub_LE";
+ case greater_equal: return "CompareStub_GE";
+ case not_equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_NE_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_NE_NO_NAN";
+ } else {
+ return "CompareStub_NE";
+ }
+ }
+ }
+ case equal: {
+ if (strict_) {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_STRICT_NO_NAN";
+ } else {
+ return "CompareStub_EQ_STRICT";
+ }
+ } else {
+ if (never_nan_nan_) {
+ return "CompareStub_EQ_NO_NAN";
+ } else {
+ return "CompareStub_EQ";
+ }
+ }
+ }
+ default: return "CompareStub";
+ }
}
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index fdace8d59..fa90f0248 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -538,15 +538,18 @@ class CodeGenerator: public AstVisitor {
// Fast support for Math.random().
void GenerateRandomPositiveSmi(ZoneList<Expression*>* args);
- // Fast support for Math.sin and Math.cos.
- enum MathOp { SIN, COS };
- void GenerateFastMathOp(MathOp op, ZoneList<Expression*>* args);
- inline void GenerateMathSin(ZoneList<Expression*>* args);
- inline void GenerateMathCos(ZoneList<Expression*>* args);
-
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
+ // Fast support for SubString.
+ void GenerateSubString(ZoneList<Expression*>* args);
+
+ // Fast support for StringCompare.
+ void GenerateStringCompare(ZoneList<Expression*>* args);
+
+ // Support for direct calls from JavaScript to native RegExp code.
+ void GenerateRegExpExec(ZoneList<Expression*>* args);
+
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
diff --git a/deps/v8/src/x64/fast-codegen-x64.cc b/deps/v8/src/x64/fast-codegen-x64.cc
index 7a991cb07..3ef867802 100644
--- a/deps/v8/src/x64/fast-codegen-x64.cc
+++ b/deps/v8/src/x64/fast-codegen-x64.cc
@@ -202,180 +202,118 @@ void FastCodeGenerator::EmitReturnSequence(int position) {
}
-void FastCodeGenerator::Move(Expression::Context context, Register source) {
+void FastCodeGenerator::Apply(Expression::Context context,
+ Slot* slot,
+ Register scratch) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
- case Expression::kValue:
- __ push(source);
- break;
- case Expression::kTest:
- TestAndBranch(source, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ push(source);
- TestAndBranch(source, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ push(source);
- TestAndBranch(source, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
+ case Expression::kValue: {
+ MemOperand location = EmitSlotSearch(slot, scratch);
+ __ push(location);
break;
}
- }
-}
-
-
-template <>
-Operand FastCodeGenerator::CreateSlotOperand<Operand>(Slot* source,
- Register scratch) {
- switch (source->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- return Operand(rbp, SlotOffset(source));
- case Slot::CONTEXT: {
- int context_chain_length =
- function_->scope()->ContextChainLength(source->var()->scope());
- __ LoadContext(scratch, context_chain_length);
- return CodeGenerator::ContextOperand(scratch, source->index());
+ case Expression::kTest:
+ case Expression::kValueTest:
+ case Expression::kTestValue:
+ Move(scratch, slot);
+ Apply(context, scratch);
break;
- }
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- // Fall-through.
- default:
- UNREACHABLE();
- return Operand(rax, 0); // Dead code to make the compiler happy.
}
}
-void FastCodeGenerator::Move(Register dst, Slot* source) {
- Operand location = CreateSlotOperand<Operand>(source, dst);
- __ movq(dst, location);
-}
-
-
-void FastCodeGenerator::Move(Expression::Context context,
- Slot* source,
- Register scratch) {
+void FastCodeGenerator::Apply(Expression::Context context, Literal* lit) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
break;
- case Expression::kValue: {
- Operand location = CreateSlotOperand<Operand>(source, scratch);
- __ push(location);
+ case Expression::kValue:
+ __ Push(lit->handle());
break;
- }
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
+ case Expression::kTest:
+ case Expression::kValueTest:
case Expression::kTestValue:
- Move(scratch, source);
- Move(context, scratch);
+ __ Move(rax, lit->handle());
+ Apply(context, rax);
break;
}
}
-void FastCodeGenerator::Move(Expression::Context context, Literal* expr) {
+void FastCodeGenerator::ApplyTOS(Expression::Context context) {
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
+ __ Drop(1);
break;
case Expression::kValue:
- __ Push(expr->handle());
- break;
- case Expression::kTest: // Fall through.
- case Expression::kValueTest: // Fall through.
- case Expression::kTestValue:
- __ Move(rax, expr->handle());
- Move(context, rax);
break;
- }
-}
-
-
-void FastCodeGenerator::Move(Slot* dst,
- Register src,
- Register scratch1,
- Register scratch2) {
- switch (dst->type()) {
- case Slot::PARAMETER:
- case Slot::LOCAL:
- __ movq(Operand(rbp, SlotOffset(dst)), src);
+ case Expression::kTest:
+ __ pop(rax);
+ TestAndBranch(rax, true_label_, false_label_);
break;
- case Slot::CONTEXT: {
- ASSERT(!src.is(scratch1));
- ASSERT(!src.is(scratch2));
- ASSERT(!scratch1.is(scratch2));
- int context_chain_length =
- function_->scope()->ContextChainLength(dst->var()->scope());
- __ LoadContext(scratch1, context_chain_length);
- __ movq(Operand(scratch1, Context::SlotOffset(dst->index())), src);
- int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
- __ RecordWrite(scratch1, offset, src, scratch2);
+ case Expression::kValueTest: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, true_label_, &discard);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(false_label_);
break;
}
- case Slot::LOOKUP:
- UNIMPLEMENTED();
- default:
- UNREACHABLE();
+ case Expression::kTestValue: {
+ Label discard;
+ __ movq(rax, Operand(rsp, 0));
+ TestAndBranch(rax, &discard, false_label_);
+ __ bind(&discard);
+ __ Drop(1);
+ __ jmp(true_label_);
+ }
}
}
-void FastCodeGenerator::DropAndMove(Expression::Context context,
- Register source,
- int drop_count) {
- ASSERT(drop_count > 0);
+void FastCodeGenerator::DropAndApply(int count,
+ Expression::Context context,
+ Register reg) {
+ ASSERT(count > 0);
+ ASSERT(!reg.is(rsp));
switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- __ addq(rsp, Immediate(drop_count * kPointerSize));
+ __ Drop(count);
break;
case Expression::kValue:
- if (drop_count > 1) {
- __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
- }
- __ movq(Operand(rsp, 0), source);
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
break;
case Expression::kTest:
- ASSERT(!source.is(rsp));
- __ addq(rsp, Immediate(drop_count * kPointerSize));
- TestAndBranch(source, true_label_, false_label_);
+ __ Drop(count);
+ TestAndBranch(reg, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
- if (drop_count > 1) {
- __ addq(rsp, Immediate((drop_count - 1) * kPointerSize));
- }
- __ movq(Operand(rsp, 0), source);
- TestAndBranch(source, true_label_, &discard);
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
+ TestAndBranch(reg, true_label_, &discard);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
- __ movq(Operand(rsp, 0), source);
- TestAndBranch(source, &discard, false_label_);
+ if (count > 1) __ Drop(count - 1);
+ __ movq(Operand(rsp, 0), reg);
+ TestAndBranch(reg, &discard, false_label_);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -383,6 +321,47 @@ void FastCodeGenerator::DropAndMove(Expression::Context context,
}
+MemOperand FastCodeGenerator::EmitSlotSearch(Slot* slot, Register scratch) {
+ switch (slot->type()) {
+ case Slot::PARAMETER:
+ case Slot::LOCAL:
+ return Operand(rbp, SlotOffset(slot));
+ case Slot::CONTEXT: {
+ int context_chain_length =
+ function_->scope()->ContextChainLength(slot->var()->scope());
+ __ LoadContext(scratch, context_chain_length);
+ return CodeGenerator::ContextOperand(scratch, slot->index());
+ }
+ case Slot::LOOKUP:
+ UNREACHABLE();
+ }
+ UNREACHABLE();
+ return Operand(rax, 0);
+}
+
+
+void FastCodeGenerator::Move(Register destination, Slot* source) {
+ MemOperand location = EmitSlotSearch(source, destination);
+ __ movq(destination, location);
+}
+
+
+void FastCodeGenerator::Move(Slot* dst,
+ Register src,
+ Register scratch1,
+ Register scratch2) {
+ ASSERT(dst->type() != Slot::LOOKUP); // Not yet implemented.
+ ASSERT(!scratch1.is(src) && !scratch2.is(src));
+ MemOperand location = EmitSlotSearch(dst, scratch1);
+ __ movq(location, src);
+ // Emit the write barrier code if the location is in the heap.
+ if (dst->type() == Slot::CONTEXT) {
+ int offset = FixedArray::kHeaderSize + dst->index() * kPointerSize;
+ __ RecordWrite(scratch1, offset, src, scratch2);
+ }
+}
+
+
void FastCodeGenerator::TestAndBranch(Register source,
Label* true_label,
Label* false_label) {
@@ -424,18 +403,21 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
if (slot != NULL) {
switch (slot->type()) {
- case Slot::PARAMETER: // Fall through.
+ case Slot::PARAMETER:
case Slot::LOCAL:
if (decl->mode() == Variable::CONST) {
__ LoadRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
- __ movq(Operand(rbp, SlotOffset(var->slot())), kScratchRegister);
+ __ movq(Operand(rbp, SlotOffset(slot)), kScratchRegister);
} else if (decl->fun() != NULL) {
Visit(decl->fun());
- __ pop(Operand(rbp, SlotOffset(var->slot())));
+ __ pop(Operand(rbp, SlotOffset(slot)));
}
break;
case Slot::CONTEXT:
+ // We bypass the general EmitSlotSearch because we know more about
+ // this specific context.
+
// The variable in the decl always resides in the current context.
ASSERT_EQ(0, function_->scope()->ContextChainLength(var->scope()));
if (FLAG_debug_code) {
@@ -509,7 +491,7 @@ void FastCodeGenerator::VisitDeclaration(Declaration* decl) {
// Value in rax is ignored (declarations are statements). Receiver
// and key on stack are discarded.
- __ addq(rsp, Immediate(2 * kPointerSize));
+ __ Drop(2);
}
}
}
@@ -539,7 +521,7 @@ void FastCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
__ push(rsi);
__ Push(boilerplate);
__ CallRuntime(Runtime::kNewClosure, 2);
- Move(expr->context(), rax);
+ Apply(expr->context(), rax);
}
@@ -566,7 +548,7 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
// is no test rax instruction here.
__ nop();
- DropAndMove(context, rax);
+ DropAndApply(1, context, rax);
} else if (rewrite->AsSlot() != NULL) {
Slot* slot = rewrite->AsSlot();
if (FLAG_debug_code) {
@@ -583,45 +565,45 @@ void FastCodeGenerator::EmitVariableLoad(Variable* var,
case Slot::LOOKUP:
UNIMPLEMENTED();
break;
- default:
- UNREACHABLE();
}
}
- Move(context, slot, rax);
+ Apply(context, slot, rax);
} else {
- // A variable has been rewritten into an explicit access to
- // an object property.
+ Comment cmnt(masm_, "Variable rewritten to property");
+ // A variable has been rewritten into an explicit access to an object
+ // property.
Property* property = rewrite->AsProperty();
ASSERT_NOT_NULL(property);
- // Currently the only parameter expressions that can occur are
- // on the form "slot[literal]".
+ // The only property expressions that can occur are of the form
+ // "slot[literal]".
- // Check that the object is in a slot.
+ // Assert that the object is in a slot.
Variable* object = property->obj()->AsVariableProxy()->AsVariable();
ASSERT_NOT_NULL(object);
Slot* object_slot = object->slot();
ASSERT_NOT_NULL(object_slot);
// Load the object.
- Move(Expression::kValue, object_slot, rax);
+ MemOperand object_loc = EmitSlotSearch(object_slot, rax);
+ __ push(object_loc);
- // Check that the key is a smi.
+ // Assert that the key is a smi.
Literal* key_literal = property->key()->AsLiteral();
ASSERT_NOT_NULL(key_literal);
ASSERT(key_literal->handle()->IsSmi());
// Load the key.
- Move(Expression::kValue, key_literal);
+ __ Push(key_literal->handle());
- // Do a KEYED property load.
+ // Do a keyed property load.
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
+ // Notice: We must not have a "test rax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
// Drop key and object left on the stack by IC, and push the result.
- DropAndMove(context, rax, 2);
+ DropAndApply(2, context, rax);
}
}
@@ -649,7 +631,7 @@ void FastCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
// Label done:
__ bind(&done);
- Move(expr->context(), rax);
+ Apply(expr->context(), rax);
}
@@ -681,7 +663,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
result_saved = true;
}
switch (property->kind()) {
- case ObjectLiteral::Property::MATERIALIZED_LITERAL: // fall through
+ case ObjectLiteral::Property::MATERIALIZED_LITERAL:
ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
case ObjectLiteral::Property::COMPUTED:
if (key->handle()->IsSymbol()) {
@@ -695,7 +677,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ movq(rax, Operand(rsp, 0)); // Restore result back into rax.
break;
}
- // fall through
+ // Fall through.
case ObjectLiteral::Property::PROTOTYPE:
__ push(rax);
Visit(key);
@@ -705,7 +687,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ CallRuntime(Runtime::kSetProperty, 3);
__ movq(rax, Operand(rsp, 0)); // Restore result into rax.
break;
- case ObjectLiteral::Property::SETTER: // fall through
+ case ObjectLiteral::Property::SETTER:
case ObjectLiteral::Property::GETTER:
__ push(rax);
Visit(key);
@@ -725,7 +707,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(rax);
@@ -739,7 +721,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(rax);
TestAndBranch(rax, true_label_, &discard);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -748,7 +730,7 @@ void FastCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (!result_saved) __ push(rax);
TestAndBranch(rax, &discard, false_label_);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -761,7 +743,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(expr->literal_index()));
- __ Push(expr->literals());
+ __ Push(expr->constant_elements());
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else {
@@ -804,7 +786,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
- if (result_saved) __ addq(rsp, Immediate(kPointerSize));
+ if (result_saved) __ Drop(1);
break;
case Expression::kValue:
if (!result_saved) __ push(rax);
@@ -818,7 +800,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(rax);
TestAndBranch(rax, true_label_, &discard);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
@@ -827,7 +809,7 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (!result_saved) __ push(rax);
TestAndBranch(rax, &discard, false_label_);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -837,18 +819,21 @@ void FastCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
void FastCodeGenerator::EmitNamedPropertyLoad(Property* prop,
Expression::Context context) {
+ SetSourcePosition(prop->position());
Literal* key = prop->key()->AsLiteral();
__ Move(rcx, key->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, rax);
+ Apply(context, rax);
}
-void FastCodeGenerator::EmitKeyedPropertyLoad(Expression::Context context) {
+void FastCodeGenerator::EmitKeyedPropertyLoad(Property* prop,
+ Expression::Context context) {
+ SetSourcePosition(prop->position());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
- Move(context, rax);
+ Apply(context, rax);
}
@@ -858,12 +843,12 @@ void FastCodeGenerator::EmitCompoundAssignmentOp(Token::Value op,
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
- Move(context, rax);
+ Apply(context, rax);
}
-void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
- Variable* var = expr->target()->AsVariableProxy()->AsVariable();
+void FastCodeGenerator::EmitVariableAssignment(Variable* var,
+ Expression::Context context) {
ASSERT(var != NULL);
ASSERT(var->is_global() || var->slot() != NULL);
if (var->is_global()) {
@@ -876,49 +861,49 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
__ Call(ic, RelocInfo::CODE_TARGET);
// Overwrite the global object on the stack with the result if needed.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, context, rax);
- } else if (var->slot()) {
+ } else if (var->slot() != NULL) {
Slot* slot = var->slot();
- ASSERT_NOT_NULL(slot); // Variables rewritten as properties not handled.
switch (slot->type()) {
case Slot::LOCAL:
case Slot::PARAMETER: {
- switch (expr->context()) {
+ Operand target = Operand(rbp, SlotOffset(slot));
+ switch (context) {
case Expression::kUninitialized:
UNREACHABLE();
case Expression::kEffect:
// Perform assignment and discard value.
- __ pop(Operand(rbp, SlotOffset(var->slot())));
+ __ pop(target);
break;
case Expression::kValue:
// Perform assignment and preserve value.
__ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ __ movq(target, rax);
break;
case Expression::kTest:
// Perform assignment and test (and discard) value.
__ pop(rax);
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ __ movq(target, rax);
TestAndBranch(rax, true_label_, false_label_);
break;
case Expression::kValueTest: {
Label discard;
__ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ __ movq(target, rax);
TestAndBranch(rax, true_label_, &discard);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(false_label_);
break;
}
case Expression::kTestValue: {
Label discard;
__ movq(rax, Operand(rsp, 0));
- __ movq(Operand(rbp, SlotOffset(var->slot())), rax);
+ __ movq(target, rax);
TestAndBranch(rax, &discard, false_label_);
__ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
__ jmp(true_label_);
break;
}
@@ -927,41 +912,20 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
}
case Slot::CONTEXT: {
- int chain_length =
- function_->scope()->ContextChainLength(slot->var()->scope());
- if (chain_length > 0) {
- // Move up the context chain to the context containing the slot.
- __ movq(rax,
- Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
- // Load the function context (which is the incoming, outer context).
- __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
- for (int i = 1; i < chain_length; i++) {
- __ movq(rax,
- Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)));
- __ movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
- }
- } else { // Slot is in the current context. Generate optimized code.
- __ movq(rax, rsi); // RecordWrite destroys the object register.
- }
- if (FLAG_debug_code) {
- __ cmpq(rax,
- Operand(rax, Context::SlotOffset(Context::FCONTEXT_INDEX)));
- __ Check(equal, "Context Slot chain length wrong.");
- }
- __ pop(rcx);
- __ movq(Operand(rax, Context::SlotOffset(slot->index())), rcx);
+ MemOperand target = EmitSlotSearch(slot, rcx);
+ __ pop(rax);
+ __ movq(target, rax);
// RecordWrite may destroy all its register arguments.
- if (expr->context() == Expression::kValue) {
- __ push(rcx);
- } else if (expr->context() != Expression::kEffect) {
- __ movq(rdx, rcx);
+ if (context == Expression::kValue) {
+ __ push(rax);
+ } else if (context != Expression::kEffect) {
+ __ movq(rdx, rax);
}
int offset = FixedArray::kHeaderSize + slot->index() * kPointerSize;
- __ RecordWrite(rax, offset, rcx, rbx);
- if (expr->context() != Expression::kEffect &&
- expr->context() != Expression::kValue) {
- Move(expr->context(), rdx);
+ __ RecordWrite(rcx, offset, rax, rbx);
+ if (context != Expression::kEffect && context != Expression::kValue) {
+ Apply(context, rdx);
}
break;
}
@@ -970,6 +934,10 @@ void FastCodeGenerator::EmitVariableAssignment(Assignment* expr) {
UNREACHABLE();
break;
}
+ } else {
+ // Variables rewritten as properties are not treated as variables in
+ // assignments.
+ UNREACHABLE();
}
}
@@ -1001,7 +969,7 @@ void FastCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
__ pop(rax);
}
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, expr->context(), rax);
}
@@ -1034,15 +1002,13 @@ void FastCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
}
// Receiver and key are still on stack.
- __ addq(rsp, Immediate(2 * kPointerSize));
- Move(expr->context(), rax);
+ DropAndApply(2, expr->context(), rax);
}
void FastCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
Expression* key = expr->key();
- uint32_t dummy;
// Record the source position for the property load.
SetSourcePosition(expr->position());
@@ -1050,29 +1016,27 @@ void FastCodeGenerator::VisitProperty(Property* expr) {
// Evaluate receiver.
Visit(expr->obj());
-
- if (key->AsLiteral() != NULL && key->AsLiteral()->handle()->IsSymbol() &&
- !String::cast(*(key->AsLiteral()->handle()))->AsArrayIndex(&dummy)) {
- // Do a NAMED property load.
- // The IC expects the property name in rcx and the receiver on the stack.
+ if (key->IsPropertyName()) {
+ // Do a named property load. The IC expects the property name in rcx
+ // and the receiver on the stack.
__ Move(rcx, key->AsLiteral()->handle());
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
// By emitting a nop we make sure that we do not have a "test rax,..."
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
+ DropAndApply(1, expr->context(), rax);
} else {
- // Do a KEYED property load.
+ // Do a keyed property load.
Visit(expr->key());
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
__ call(ic, RelocInfo::CODE_TARGET);
- // Notice: We must not have a "test rax, ..." instruction after
- // the call. It is treated specially by the LoadIC code.
-
- // Drop key left on the stack by IC.
- __ addq(rsp, Immediate(kPointerSize));
+ // Notice: We must not have a "test rax, ..." instruction after the
+ // call. It is treated specially by the LoadIC code.
+ __ nop();
+ // Drop key and receiver left on the stack by IC.
+ DropAndApply(2, expr->context(), rax);
}
- DropAndMove(expr->context(), rax);
}
@@ -1095,7 +1059,7 @@ void FastCodeGenerator::EmitCallWithIC(Call* expr,
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, expr->context(), rax);
}
@@ -1113,7 +1077,7 @@ void FastCodeGenerator::EmitCallWithStub(Call* expr) {
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, expr->context(), rax);
}
@@ -1157,14 +1121,15 @@ void FastCodeGenerator::VisitCall(Call* expr) {
// instruction after the call it is treated specially by the LoadIC code.
__ nop();
// Drop key left on the stack by IC.
- __ addq(rsp, Immediate(kPointerSize));
+ __ Drop(1);
// Pop receiver.
__ pop(rbx);
// Push result (function).
__ push(rax);
// Push receiver object on stack.
if (prop->is_synthetic()) {
- __ push(CodeGenerator::GlobalObject());
+ __ movq(rcx, CodeGenerator::GlobalObject());
+ __ push(FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
} else {
__ push(rbx);
}
@@ -1226,7 +1191,7 @@ void FastCodeGenerator::VisitCallNew(CallNew* expr) {
__ Call(construct_builtin, RelocInfo::CONSTRUCT_CALL);
// Replace function on TOS with result in rax, or pop it.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, expr->context(), rax);
}
@@ -1256,82 +1221,10 @@ void FastCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Restore context register.
__ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- DropAndMove(expr->context(), rax);
+ DropAndApply(1, expr->context(), rax);
} else {
__ CallRuntime(expr->function(), arg_count);
- Move(expr->context(), rax);
- }
-}
-
-void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
- Comment cmnt(masm_, "[ CountOperation");
- VariableProxy* proxy = expr->expression()->AsVariableProxy();
- ASSERT(proxy->AsVariable() != NULL);
- ASSERT(proxy->AsVariable()->is_global());
-
- Visit(proxy);
- __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kValue: // Fall through
- case Expression::kTest: // Fall through
- case Expression::kTestValue: // Fall through
- case Expression::kValueTest:
- // Duplicate the result on the stack.
- __ push(rax);
- break;
- case Expression::kEffect:
- // Do not save result.
- break;
- }
- // Call runtime for +1/-1.
- __ push(rax);
- __ Push(Smi::FromInt(1));
- if (expr->op() == Token::INC) {
- __ CallRuntime(Runtime::kNumberAdd, 2);
- } else {
- __ CallRuntime(Runtime::kNumberSub, 2);
- }
- // Call Store IC.
- __ Move(rcx, proxy->AsVariable()->name());
- __ push(CodeGenerator::GlobalObject());
- Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
- __ call(ic, RelocInfo::CODE_TARGET);
- // Restore up stack after store IC
- __ addq(rsp, Immediate(kPointerSize));
-
- switch (expr->context()) {
- case Expression::kUninitialized:
- UNREACHABLE();
- case Expression::kEffect: // Fall through
- case Expression::kValue:
- // Do nothing. Result in either on the stack for value context
- // or discarded for effect context.
- break;
- case Expression::kTest:
- __ pop(rax);
- TestAndBranch(rax, true_label_, false_label_);
- break;
- case Expression::kValueTest: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, true_label_, &discard);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(false_label_);
- break;
- }
- case Expression::kTestValue: {
- Label discard;
- __ movq(rax, Operand(rsp, 0));
- TestAndBranch(rax, &discard, false_label_);
- __ bind(&discard);
- __ addq(rsp, Immediate(kPointerSize));
- __ jmp(true_label_);
- break;
- }
+ Apply(expr->context(), rax);
}
}
@@ -1355,7 +1248,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// Value is false so it's needed.
__ PushRoot(Heap::kUndefinedValueRootIndex);
// Fall through.
- case Expression::kTest: // Fall through.
+ case Expression::kTest:
case Expression::kValueTest:
__ jmp(false_label_);
break;
@@ -1367,20 +1260,14 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
Comment cmnt(masm_, "[ UnaryOperation (NOT)");
ASSERT_EQ(Expression::kTest, expr->expression()->context());
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ Label push_true, push_false, done;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
case Expression::kValue:
- true_label_ = &push_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, &push_true);
__ bind(&push_true);
__ PushRoot(Heap::kTrueValueRootIndex);
__ jmp(&done);
@@ -1390,38 +1277,28 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
break;
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &done, &done);
__ bind(&done);
break;
case Expression::kTest:
- true_label_ = saved_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, true_label_);
break;
case Expression::kValueTest:
- true_label_ = saved_false;
- false_label_ = &push_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), false_label_, &push_true);
__ bind(&push_true);
__ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
- true_label_ = &push_false;
- false_label_ = saved_true;
- Visit(expr->expression());
+ VisitForControl(expr->expression(), &push_false, true_label_);
__ bind(&push_false);
__ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
break;
}
@@ -1454,7 +1331,7 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
__ CallRuntime(Runtime::kTypeof, 1);
- Move(expr->context(), rax);
+ Apply(expr->context(), rax);
break;
}
@@ -1464,6 +1341,139 @@ void FastCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
+void FastCodeGenerator::VisitCountOperation(CountOperation* expr) {
+ Comment cmnt(masm_, "[ CountOperation");
+
+ // Expression can only be a property, a global or a (parameter or local)
+ // slot. Variables with rewrite to .arguments are treated as KEYED_PROPERTY.
+ enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+ LhsKind assign_type = VARIABLE;
+ Property* prop = expr->expression()->AsProperty();
+ // In case of a property we use the uninitialized expression context
+ // of the key to detect a named property.
+ if (prop != NULL) {
+ assign_type = (prop->key()->context() == Expression::kUninitialized)
+ ? NAMED_PROPERTY
+ : KEYED_PROPERTY;
+ }
+
+ // Evaluate expression and get value.
+ if (assign_type == VARIABLE) {
+ ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+ EmitVariableLoad(expr->expression()->AsVariableProxy()->var(),
+ Expression::kValue);
+ } else {
+ // Reserve space for result of postfix operation.
+ if (expr->is_postfix() && expr->context() != Expression::kEffect) {
+ ASSERT(expr->context() != Expression::kUninitialized);
+ __ Push(Smi::FromInt(0));
+ }
+ Visit(prop->obj());
+ ASSERT_EQ(Expression::kValue, prop->obj()->context());
+ if (assign_type == NAMED_PROPERTY) {
+ EmitNamedPropertyLoad(prop, Expression::kValue);
+ } else {
+ Visit(prop->key());
+ ASSERT_EQ(Expression::kValue, prop->key()->context());
+ EmitKeyedPropertyLoad(prop, Expression::kValue);
+ }
+ }
+
+ // Convert to number.
+ __ InvokeBuiltin(Builtins::TO_NUMBER, CALL_FUNCTION);
+
+ // Save result for postfix expressions.
+ if (expr->is_postfix()) {
+ switch (expr->context()) {
+ case Expression::kUninitialized:
+ UNREACHABLE();
+ case Expression::kEffect:
+ // Do not save result.
+ break;
+ case Expression::kValue: // Fall through
+ case Expression::kTest: // Fall through
+ case Expression::kTestValue: // Fall through
+ case Expression::kValueTest:
+ // Save the result on the stack. If we have a named or keyed property
+ // we store the result under the receiver that is currently on top
+ // of the stack.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ break;
+ case NAMED_PROPERTY:
+ __ movq(Operand(rsp, kPointerSize), rax);
+ break;
+ case KEYED_PROPERTY:
+ __ movq(Operand(rsp, 2 * kPointerSize), rax);
+ break;
+ }
+ break;
+ }
+ }
+
+ // Call runtime for +1/-1.
+ __ push(rax);
+ __ Push(Smi::FromInt(1));
+ if (expr->op() == Token::INC) {
+ __ CallRuntime(Runtime::kNumberAdd, 2);
+ } else {
+ __ CallRuntime(Runtime::kNumberSub, 2);
+ }
+
+ // Store the value returned in rax.
+ switch (assign_type) {
+ case VARIABLE:
+ __ push(rax);
+ if (expr->is_postfix()) {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ Expression::kEffect);
+ // For all contexts except kEffect: We have the result on
+ // top of the stack.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+ expr->context());
+ }
+ break;
+ case NAMED_PROPERTY: {
+ __ Move(rcx, prop->key()->AsLiteral()->handle());
+ Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(1); // Result is on the stack under the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(1, expr->context(), rax);
+ }
+ break;
+ }
+ case KEYED_PROPERTY: {
+ Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ // This nop signals to the IC that there is no inlined code at the call
+ // site for it to patch.
+ __ nop();
+ if (expr->is_postfix()) {
+ __ Drop(2); // Result is on the stack under the key and the receiver.
+ if (expr->context() != Expression::kEffect) {
+ ApplyTOS(expr->context());
+ }
+ } else {
+ DropAndApply(2, expr->context(), rax);
+ }
+ break;
+ }
+ }
+}
+
void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
Comment cmnt(masm_, "[ BinaryOperation");
switch (expr->op()) {
@@ -1499,7 +1509,7 @@ void FastCodeGenerator::VisitBinaryOperation(BinaryOperation* expr) {
NO_OVERWRITE,
NO_GENERIC_BINARY_FLAGS);
__ CallStub(&stub);
- Move(expr->context(), rax);
+ Apply(expr->context(), rax);
break;
}
@@ -1516,46 +1526,40 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Visit(expr->left());
Visit(expr->right());
- // Convert current context to test context: Pre-test code.
- Label push_true;
- Label push_false;
- Label done;
- Label* saved_true = true_label_;
- Label* saved_false = false_label_;
+ // Always perform the comparison for its control flow. Pack the result
+ // into the expression's context after the comparison is performed.
+ Label push_true, push_false, done;
+ // Initially assume we are in a test context.
+ Label* if_true = true_label_;
+ Label* if_false = false_label_;
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
-
case Expression::kValue:
- true_label_ = &push_true;
- false_label_ = &push_false;
+ if_true = &push_true;
+ if_false = &push_false;
break;
-
case Expression::kEffect:
- true_label_ = &done;
- false_label_ = &done;
+ if_true = &done;
+ if_false = &done;
break;
-
case Expression::kTest:
break;
-
case Expression::kValueTest:
- true_label_ = &push_true;
+ if_true = &push_true;
break;
-
case Expression::kTestValue:
- false_label_ = &push_false;
+ if_false = &push_false;
break;
}
- // Convert current context to test context: End pre-test code.
switch (expr->op()) {
case Token::IN: {
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
__ CompareRoot(rax, Heap::kTrueValueRootIndex);
- __ j(equal, true_label_);
- __ jmp(false_label_);
+ __ j(equal, if_true);
+ __ jmp(if_false);
break;
}
@@ -1563,8 +1567,8 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
InstanceofStub stub;
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(zero, true_label_); // The stub returns 0 for true.
- __ jmp(false_label_);
+ __ j(zero, if_true); // The stub returns 0 for true.
+ __ jmp(if_false);
break;
}
@@ -1574,7 +1578,7 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
switch (expr->op()) {
case Token::EQ_STRICT:
strict = true;
- // Fall through
+ // Fall through.
case Token::EQ:
cc = equal;
__ pop(rax);
@@ -1613,24 +1617,29 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label slow_case;
__ JumpIfNotBothSmi(rax, rdx, &slow_case);
__ SmiCompare(rdx, rax);
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
__ bind(&slow_case);
CompareStub stub(cc, strict);
__ CallStub(&stub);
__ testq(rax, rax);
- __ j(cc, true_label_);
- __ jmp(false_label_);
+ __ j(cc, if_true);
+ __ jmp(if_false);
}
}
- // Convert current context to test context: Post-test code.
+ // Convert the result of the comparison into one expected for this
+ // expression's context.
switch (expr->context()) {
case Expression::kUninitialized:
UNREACHABLE();
break;
+ case Expression::kEffect:
+ __ bind(&done);
+ break;
+
case Expression::kValue:
__ bind(&push_true);
__ PushRoot(Heap::kTrueValueRootIndex);
@@ -1640,34 +1649,27 @@ void FastCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ bind(&done);
break;
- case Expression::kEffect:
- __ bind(&done);
- break;
-
case Expression::kTest:
break;
case Expression::kValueTest:
__ bind(&push_true);
__ PushRoot(Heap::kTrueValueRootIndex);
- __ jmp(saved_true);
+ __ jmp(true_label_);
break;
case Expression::kTestValue:
__ bind(&push_false);
__ PushRoot(Heap::kFalseValueRootIndex);
- __ jmp(saved_false);
+ __ jmp(false_label_);
break;
}
- true_label_ = saved_true;
- false_label_ = saved_false;
- // Convert current context to test context: End post-test code.
}
void FastCodeGenerator::VisitThisFunction(ThisFunction* expr) {
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- Move(expr->context(), rax);
+ Apply(expr->context(), rax);
}
@@ -1722,11 +1724,6 @@ void FastCodeGenerator::ExitFinallyBlock() {
}
-void FastCodeGenerator::ThrowException() {
- __ push(result_register());
- __ CallRuntime(Runtime::kThrow, 1);
-}
-
#undef __
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index e24007559..a0f87ad4a 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -330,6 +330,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
// Is the string a symbol?
__ j(not_zero, &index_string); // The value in rbx is used at jump target.
+ ASSERT(kSymbolTag != 0);
__ testb(FieldOperand(rdx, Map::kInstanceTypeOffset),
Immediate(kIsSymbolMask));
__ j(zero, &slow);
@@ -408,6 +409,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
}
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+ // ----------- S t a t e -------------
+ // -- rsp[0] : return address
+ // -- rsp[8] : name
+ // -- rsp[16] : receiver
+ // -----------------------------------
+ GenerateGeneric(masm);
+}
+
+
void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm,
ExternalArrayType array_type) {
// ----------- S t a t e -------------
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 5cf09f267..dc77c8948 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2374,7 +2374,7 @@ void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
// Move up the chain of contexts to the context containing the slot.
movq(dst, Operand(rsi, Context::SlotOffset(Context::CLOSURE_INDEX)));
// Load the function context (which is the incoming, outer context).
- movq(rax, FieldOperand(rax, JSFunction::kContextOffset));
+ movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
for (int i = 1; i < context_chain_length; i++) {
movq(dst, Operand(dst, Context::SlotOffset(Context::CLOSURE_INDEX)));
movq(dst, FieldOperand(dst, JSFunction::kContextOffset));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 97200051a..37f96a66f 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -38,6 +38,9 @@ namespace internal {
// function calling convention.
static const Register kScratchRegister = r10;
+// Convenience for platform-independent signatures.
+typedef Operand MemOperand;
+
// Forward declaration.
class JumpTarget;
@@ -412,7 +415,11 @@ class MacroAssembler: public Assembler {
void Cmp(Register dst, Handle<Object> source);
void Cmp(const Operand& dst, Handle<Object> source);
void Push(Handle<Object> source);
+
+ // Emit code to discard a non-negative number of pointer-sized elements
+ // from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
+
void Call(Label* target) { call(target); }
// Control Flow
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 639f5e95b..09cb9177a 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -60,20 +60,24 @@ namespace internal {
* - r8 : code object pointer. Used to convert between absolute and
* code-object-relative addresses.
*
- * The registers rax, rbx, rcx, r9 and r11 are free to use for computations.
+ * The registers rax, rbx, r9 and r11 are free to use for computations.
* If changed to use r12+, they should be saved as callee-save registers.
*
* Each call to a C++ method should retain these registers.
*
* The stack will have the following content, in some order, indexable from the
* frame pointer (see, e.g., kStackHighEnd):
- * - stack_area_base (High end of the memory area to use as
- * backtracking stack)
- * - at_start (if 1, start at start of string, if 0, don't)
- * - int* capture_array (int[num_saved_registers_], for output).
- * - end of input (Address of end of string)
- * - start of input (Address of first character in string)
- * - String** input_string (location of a handle containing the string)
+ * - direct_call (if 1, direct call from JavaScript code, if 0 call
+ * through the runtime system)
+ * - stack_area_base (High end of the memory area to use as
+ * backtracking stack)
+ * - at_start (if 1, we are starting at the start of the
+ * string, otherwise 0)
+ * - int* capture_array (int[num_saved_registers_], for output).
+ * - end of input (Address of end of string)
+ * - start of input (Address of first character in string)
+ * - start index (character index of start)
+ * - String* input_string (input string)
* - return address
* - backup of callee save registers (rbx, possibly rsi and rdi).
* - Offset of location before start of input (effectively character
@@ -90,11 +94,13 @@ namespace internal {
* calling the code's entry address cast to a function pointer with the
* following signature:
* int (*match)(String* input_string,
+ * int start_index,
* Address start,
* Address end,
* int* capture_output_array,
* bool at_start,
- * byte* stack_area_base)
+ * byte* stack_area_base,
+ * bool direct_call)
*/
#define __ ACCESS_MASM(masm_)
@@ -490,27 +496,22 @@ void RegExpMacroAssemblerX64::CheckNotCharacterAfterMinusAnd(
bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match) {
// Range checks (c in min..max) are generally implemented by an unsigned
- // (c - min) <= (max - min) check
+ // (c - min) <= (max - min) check, using the sequence:
+ // lea(rax, Operand(current_character(), -min)) or sub(rax, Immediate(min))
+ // cmp(rax, Immediate(max - min))
switch (type) {
case 's':
// Match space-characters
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
Label success;
__ cmpl(current_character(), Immediate(' '));
__ j(equal, &success);
// Check range 0x09..0x0d
- __ subl(current_character(), Immediate('\t'));
- __ cmpl(current_character(), Immediate('\r' - '\t'));
+ __ lea(rax, Operand(current_character(), -'\t'));
+ __ cmpl(rax, Immediate('\r' - '\t'));
BranchOrBacktrack(above, on_no_match);
__ bind(&success);
return true;
@@ -518,72 +519,116 @@ bool RegExpMacroAssemblerX64::CheckSpecialCharacterClass(uc16 type,
return false;
case 'S':
// Match non-space characters.
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
if (mode_ == ASCII) {
// ASCII space characters are '\t'..'\r' and ' '.
__ cmpl(current_character(), Immediate(' '));
BranchOrBacktrack(equal, on_no_match);
- __ subl(current_character(), Immediate('\t'));
- __ cmpl(current_character(), Immediate('\r' - '\t'));
+ __ lea(rax, Operand(current_character(), -'\t'));
+ __ cmpl(rax, Immediate('\r' - '\t'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
}
return false;
case 'd':
// Match ASCII digits ('0'..'9')
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ subl(current_character(), Immediate('0'));
- __ cmpl(current_character(), Immediate('9' - '0'));
+ __ lea(rax, Operand(current_character(), -'0'));
+ __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(above, on_no_match);
return true;
case 'D':
// Match non ASCII-digits
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ subl(current_character(), Immediate('0'));
- __ cmpl(current_character(), Immediate('9' - '0'));
+ __ lea(rax, Operand(current_character(), -'0'));
+ __ cmpl(rax, Immediate('9' - '0'));
BranchOrBacktrack(below_equal, on_no_match);
return true;
case '.': {
// Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
- if (check_offset) {
- LoadCurrentCharacter(cp_offset, on_no_match, 1);
- } else {
- LoadCurrentCharacterUnchecked(cp_offset, 1);
- }
- __ xor_(current_character(), Immediate(0x01));
+ __ movl(rax, current_character());
+ __ xor_(rax, Immediate(0x01));
// See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
- __ subl(current_character(), Immediate(0x0b));
- __ cmpl(current_character(), Immediate(0x0c - 0x0b));
+ __ subl(rax, Immediate(0x0b));
+ __ cmpl(rax, Immediate(0x0c - 0x0b));
BranchOrBacktrack(below_equal, on_no_match);
if (mode_ == UC16) {
// Compare original value to 0x2028 and 0x2029, using the already
// computed (current_char ^ 0x01 - 0x0b). I.e., check for
// 0x201d (0x2028 - 0x0b) or 0x201e.
- __ subl(current_character(), Immediate(0x2028 - 0x0b));
- __ cmpl(current_character(), Immediate(1));
+ __ subl(rax, Immediate(0x2028 - 0x0b));
+ __ cmpl(rax, Immediate(0x2029 - 0x2028));
BranchOrBacktrack(below_equal, on_no_match);
}
return true;
}
+ case 'n': {
+ // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+ __ movl(rax, current_character());
+ __ xor_(rax, Immediate(0x01));
+ // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+ __ subl(rax, Immediate(0x0b));
+ __ cmpl(rax, Immediate(0x0c - 0x0b));
+ if (mode_ == ASCII) {
+ BranchOrBacktrack(above, on_no_match);
+ } else {
+ Label done;
+ BranchOrBacktrack(below_equal, &done);
+ // Compare original value to 0x2028 and 0x2029, using the already
+ // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+ // 0x201d (0x2028 - 0x0b) or 0x201e.
+ __ subl(rax, Immediate(0x2028 - 0x0b));
+ __ cmpl(rax, Immediate(0x2029 - 0x2028));
+ BranchOrBacktrack(above, on_no_match);
+ __ bind(&done);
+ }
+ return true;
+ }
+ case 'w': {
+ Label done, check_digits;
+ __ cmpl(current_character(), Immediate('9'));
+ __ j(less_equal, &check_digits);
+ __ cmpl(current_character(), Immediate('_'));
+ __ j(equal, &done);
+ // Convert to lower case if letter.
+ __ movl(rax, current_character());
+ __ orl(rax, Immediate(0x20));
+ // check rax in range ['a'..'z'].
+ __ subl(rax, Immediate('a'));
+ __ cmpl(rax, Immediate('z' - 'a'));
+ BranchOrBacktrack(above, on_no_match);
+ __ jmp(&done);
+ __ bind(&check_digits);
+ // Check current character in range ['0'..'9'].
+ __ cmpl(current_character(), Immediate('0'));
+ BranchOrBacktrack(below, on_no_match);
+ __ bind(&done);
+
+ return true;
+ }
+ case 'W': {
+ Label done, check_digits;
+ __ cmpl(current_character(), Immediate('9'));
+ __ j(less_equal, &check_digits);
+ __ cmpl(current_character(), Immediate('_'));
+ BranchOrBacktrack(equal, on_no_match);
+ // Convert to lower case if letter.
+ __ movl(rax, current_character());
+ __ orl(rax, Immediate(0x20));
+ // check current character in range ['a'..'z'], nondestructively.
+ __ subl(rax, Immediate('a'));
+ __ cmpl(rax, Immediate('z' - 'a'));
+ BranchOrBacktrack(below_equal, on_no_match);
+ __ jmp(&done);
+ __ bind(&check_digits);
+ // Check current character in range ['0'..'9'].
+ __ cmpl(current_character(), Immediate('0'));
+ BranchOrBacktrack(above_equal, on_no_match);
+ __ bind(&done);
+
+ return true;
+ }
case '*':
// Match any character.
- if (check_offset) {
- CheckPosition(cp_offset, on_no_match);
- }
return true;
- // No custom implementation (yet): w, W, s(UC16), S(UC16).
+ // No custom implementation (yet): s(UC16), S(UC16).
default:
return false;
}
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.h b/deps/v8/src/x64/regexp-macro-assembler-x64.h
index 3e6720dc0..694cba003 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.h
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.h
@@ -73,8 +73,6 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// the end of the string.
virtual void CheckPosition(int cp_offset, Label* on_outside_input);
virtual bool CheckSpecialCharacterClass(uc16 type,
- int cp_offset,
- bool check_offset,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
@@ -143,6 +141,8 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
// AtStart is passed as 32 bit int (values 0 or 1).
static const int kAtStart = kRegisterOutput + kPointerSize;
static const int kStackHighEnd = kAtStart + kPointerSize;
+ // DirectCall is passed as 32 bit int (values 0 or 1).
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
#else
// In AMD64 ABI Calling Convention, the first six integer parameters
// are passed as registers, and caller must allocate space on the stack
@@ -154,6 +154,7 @@ class RegExpMacroAssemblerX64: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kInputEnd - kPointerSize;
static const int kAtStart = kRegisterOutput - kPointerSize;
static const int kStackHighEnd = kFrameAlign;
+ static const int kDirectCall = kStackHighEnd + kPointerSize;
#endif
#ifdef _WIN64
diff --git a/deps/v8/src/x64/simulator-x64.h b/deps/v8/src/x64/simulator-x64.h
index c4f3a85af..015ba1315 100644
--- a/deps/v8/src/x64/simulator-x64.h
+++ b/deps/v8/src/x64/simulator-x64.h
@@ -53,9 +53,9 @@ class SimulatorStack : public v8::internal::AllStatic {
};
// Call the generated regexp code directly. The entry function pointer should
-// expect seven int/pointer sized arguments and return an int.
-#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
- entry(p0, p1, p2, p3, p4, p5, p6)
+// expect eight int/pointer sized arguments and return an int.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
+ entry(p0, p1, p2, p3, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address)
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 32bd3450f..cbddb61ee 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -1827,17 +1827,15 @@ Object* ConstructStubCompiler::CompileConstructStub(
// depending on the this.x = ...; assignment in the function.
for (int i = 0; i < shared->this_property_assignments_count(); i++) {
if (shared->IsThisPropertyAssignmentArgument(i)) {
- Label not_passed;
- // Set the property to undefined.
- __ movq(Operand(r9, i * kPointerSize), r8);
// Check if the argument assigned to the property is actually passed.
+ // If argument is not passed the property is set to undefined,
+ // otherwise find it on the stack.
int arg_number = shared->GetThisPropertyAssignmentArgument(i);
+ __ movq(rbx, r8);
__ cmpq(rax, Immediate(arg_number));
- __ j(below_equal, &not_passed);
- // Argument passed - find it on the stack.
- __ movq(rbx, Operand(rcx, arg_number * -kPointerSize));
+ __ cmovq(above, rbx, Operand(rcx, arg_number * -kPointerSize));
+ // Store value in the property.
__ movq(Operand(r9, i * kPointerSize), rbx);
- __ bind(&not_passed);
} else {
// Set the property to the constant value.
Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i));
diff --git a/deps/v8/src/x64/virtual-frame-x64.cc b/deps/v8/src/x64/virtual-frame-x64.cc
index fe65d34a0..6e84ed163 100644
--- a/deps/v8/src/x64/virtual-frame-x64.cc
+++ b/deps/v8/src/x64/virtual-frame-x64.cc
@@ -129,11 +129,29 @@ void VirtualFrame::AllocateStackSlots() {
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
- __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ if (count == 1) {
+ __ Push(undefined);
+ } else if (count < kLocalVarBound) {
+ // For less locals the unrolled loop is more compact.
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ for (int i = 0; i < count; i++) {
+ __ push(kScratchRegister);
+ }
+ } else {
+ // For more locals a loop in generated code is more compact.
+ Label alloc_locals_loop;
+ Result cnt = cgen()->allocator()->Allocate();
+ ASSERT(cnt.is_valid());
+ __ movq(cnt.reg(), Immediate(count));
+ __ movq(kScratchRegister, undefined, RelocInfo::EMBEDDED_OBJECT);
+ __ bind(&alloc_locals_loop);
+ __ push(kScratchRegister);
+ __ decl(cnt.reg());
+ __ j(not_zero, &alloc_locals_loop);
+ }
for (int i = 0; i < count; i++) {
elements_.Add(initial_value);
stack_pointer_++;
- __ push(kScratchRegister);
}
}
}
diff --git a/deps/v8/src/x64/virtual-frame-x64.h b/deps/v8/src/x64/virtual-frame-x64.h
index e49230507..88cf2bca0 100644
--- a/deps/v8/src/x64/virtual-frame-x64.h
+++ b/deps/v8/src/x64/virtual-frame-x64.h
@@ -200,6 +200,9 @@ class VirtualFrame : public ZoneObject {
// shared return site. Emits code for spills.
void PrepareForReturn();
+ // Number of local variables after when we use a loop for allocating.
+ static const int kLocalVarBound = 7;
+
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 3e3c9578e..3db7c37f1 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -6742,6 +6742,27 @@ TEST(PreCompile) {
v8::ScriptData::PreCompile(script, i::StrLength(script));
CHECK_NE(sd->Length(), 0);
CHECK_NE(sd->Data(), NULL);
+ CHECK(!sd->HasError());
+ delete sd;
+}
+
+
+TEST(PreCompileWithError) {
+ v8::V8::Initialize();
+ const char *script = "function foo(a) { return 1 * * 2; }";
+ v8::ScriptData *sd =
+ v8::ScriptData::PreCompile(script, i::StrLength(script));
+ CHECK(sd->HasError());
+ delete sd;
+}
+
+
+TEST(Regress31661) {
+ v8::V8::Initialize();
+ const char *script = " The Definintive Guide";
+ v8::ScriptData *sd =
+ v8::ScriptData::PreCompile(script, i::StrLength(script));
+ CHECK(sd->HasError());
delete sd;
}
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index b8b3364ac..ba4eec26a 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -57,7 +57,7 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmIa320) {
InitializeVM();
v8::HandleScope scope;
- v8::internal::byte buffer[1024];
+ v8::internal::byte buffer[2048];
Assembler assm(buffer, sizeof buffer);
DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
@@ -223,13 +223,16 @@ TEST(DisasmIa320) {
__ sub(Operand(ebx), Immediate(12));
__ sub(Operand(edx, ecx, times_4, 10000), Immediate(12));
+ __ subb(Operand(edx, ecx, times_4, 10000), 100);
+ __ subb(Operand(eax), 100);
+ __ subb(eax, Operand(edx, ecx, times_4, 10000));
__ xor_(ebx, 12345);
__ imul(edx, ecx, 12);
__ imul(edx, ecx, 1000);
-
+ __ rep_movs();
__ sub(edx, Operand(ebx, ecx, times_4, 10000));
__ sub(edx, Operand(ebx));
@@ -365,6 +368,12 @@ TEST(DisasmIa320) {
__ movdbl(xmm1, Operand(ebx, ecx, times_4, 10000));
__ movdbl(Operand(ebx, ecx, times_4, 10000), xmm1);
__ comisd(xmm0, xmm1);
+
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqa(Operand(ebx, ecx, times_4, 10000), xmm0);
+ __ movdqu(xmm0, Operand(ebx, ecx, times_4, 10000));
+ __ movdqu(Operand(ebx, ecx, times_4, 10000), xmm0);
}
// cmov.
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 6aa0730c0..c72c4d19f 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -58,6 +58,16 @@
using namespace v8::internal;
+static bool CheckParse(const char* input) {
+ V8::Initialize(NULL);
+ v8::HandleScope scope;
+ ZoneScope zone_scope(DELETE_ON_EXIT);
+ FlatStringReader reader(CStrVector(input));
+ RegExpCompileData result;
+ return v8::internal::ParseRegExp(&reader, false, &result);
+}
+
+
static SmartPointer<const char> Parse(const char* input) {
V8::Initialize(NULL);
v8::HandleScope scope;
@@ -106,7 +116,7 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
}
-
+#define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, *Parse(input))
#define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
#define CHECK_MIN_MAX(input, min, max) \
@@ -117,6 +127,9 @@ static MinMaxPair CheckMinMaxMatch(const char* input) {
TEST(Parser) {
V8::Initialize(NULL);
+
+ CHECK_PARSE_ERROR("?");
+
CHECK_PARSE_EQ("abc", "'abc'");
CHECK_PARSE_EQ("", "%");
CHECK_PARSE_EQ("abc|def", "(| 'abc' 'def')");
@@ -600,6 +613,34 @@ TEST(DispatchTableConstruction) {
}
}
+// Test of debug-only syntax.
+#ifdef DEBUG
+
+TEST(ParsePossessiveRepetition) {
+ bool old_flag_value = FLAG_regexp_possessive_quantifier;
+
+ // Enable possessive quantifier syntax.
+ FLAG_regexp_possessive_quantifier = true;
+
+ CHECK_PARSE_EQ("a*+", "(# 0 - p 'a')");
+ CHECK_PARSE_EQ("a++", "(# 1 - p 'a')");
+ CHECK_PARSE_EQ("a?+", "(# 0 1 p 'a')");
+ CHECK_PARSE_EQ("a{10,20}+", "(# 10 20 p 'a')");
+ CHECK_PARSE_EQ("za{10,20}+b", "(: 'z' (# 10 20 p 'a') 'b')");
+
+ // Disable possessive quantifier syntax.
+ FLAG_regexp_possessive_quantifier = false;
+
+ CHECK_PARSE_ERROR("a*+");
+ CHECK_PARSE_ERROR("a++");
+ CHECK_PARSE_ERROR("a?+");
+ CHECK_PARSE_ERROR("a{10,20}+");
+ CHECK_PARSE_ERROR("a{10,20}+b");
+
+ FLAG_regexp_possessive_quantifier = old_flag_value;
+}
+
+#endif
// Tests of interpreter.
@@ -1550,7 +1591,68 @@ TEST(CharClassDifference) {
}
+TEST(CanonicalizeCharacterSets) {
+ ZoneScope scope(DELETE_ON_EXIT);
+ ZoneList<CharacterRange>* list = new ZoneList<CharacterRange>(4);
+ CharacterSet set(list);
+
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(30, 40));
+ list->Add(CharacterRange(50, 60));
+ set.Canonicalize();
+ ASSERT_EQ(3, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(20, list->at(0).to());
+ ASSERT_EQ(30, list->at(1).from());
+ ASSERT_EQ(40, list->at(1).to());
+ ASSERT_EQ(50, list->at(2).from());
+ ASSERT_EQ(60, list->at(2).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(50, 60));
+ list->Add(CharacterRange(30, 40));
+ set.Canonicalize();
+ ASSERT_EQ(3, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(20, list->at(0).to());
+ ASSERT_EQ(30, list->at(1).from());
+ ASSERT_EQ(40, list->at(1).to());
+ ASSERT_EQ(50, list->at(2).from());
+ ASSERT_EQ(60, list->at(2).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(30, 40));
+ list->Add(CharacterRange(10, 20));
+ list->Add(CharacterRange(25, 25));
+ list->Add(CharacterRange(100, 100));
+ list->Add(CharacterRange(1, 1));
+ set.Canonicalize();
+ ASSERT_EQ(5, list->length());
+ ASSERT_EQ(1, list->at(0).from());
+ ASSERT_EQ(1, list->at(0).to());
+ ASSERT_EQ(10, list->at(1).from());
+ ASSERT_EQ(20, list->at(1).to());
+ ASSERT_EQ(25, list->at(2).from());
+ ASSERT_EQ(25, list->at(2).to());
+ ASSERT_EQ(30, list->at(3).from());
+ ASSERT_EQ(40, list->at(3).to());
+ ASSERT_EQ(100, list->at(4).from());
+ ASSERT_EQ(100, list->at(4).to());
+
+ list->Rewind(0);
+ list->Add(CharacterRange(10, 19));
+ list->Add(CharacterRange(21, 30));
+ list->Add(CharacterRange(20, 20));
+ set.Canonicalize();
+ ASSERT_EQ(1, list->length());
+ ASSERT_EQ(10, list->at(0).from());
+ ASSERT_EQ(30, list->at(0).to());
+}
+
+
+
TEST(Graph) {
V8::Initialize(NULL);
- Execute("(?:(?:x(.))?\1)+$", false, true, true);
+ Execute("\\b\\w+\\b", false, true, true);
}
diff --git a/deps/v8/test/cctest/test-serialize.cc b/deps/v8/test/cctest/test-serialize.cc
index 8f4441ac7..d02972b83 100644
--- a/deps/v8/test/cctest/test-serialize.cc
+++ b/deps/v8/test/cctest/test-serialize.cc
@@ -37,6 +37,8 @@
#include "scopeinfo.h"
#include "snapshot.h"
#include "cctest.h"
+#include "spaces.h"
+#include "objects.h"
using namespace v8::internal;
@@ -277,6 +279,149 @@ DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
}
+class FileByteSink : public SnapshotByteSink {
+ public:
+ explicit FileByteSink(const char* snapshot_file) {
+ fp_ = OS::FOpen(snapshot_file, "wb");
+ if (fp_ == NULL) {
+ PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
+ exit(1);
+ }
+ }
+ virtual ~FileByteSink() {
+ if (fp_ != NULL) {
+ fclose(fp_);
+ }
+ }
+ virtual void Put(int byte, const char* description) {
+ if (fp_ != NULL) {
+ fputc(byte, fp_);
+ }
+ }
+
+ private:
+ FILE* fp_;
+};
+
+
+TEST(PartialSerialization) {
+ Serializer::Enable();
+ v8::V8::Initialize();
+ v8::Persistent<v8::Context> env = v8::Context::New();
+ env->Enter();
+
+ v8::HandleScope handle_scope;
+ v8::Local<v8::String> foo = v8::String::New("foo");
+
+ FileByteSink file(FLAG_testing_serialization_file);
+ Serializer ser(&file);
+ i::Handle<i::String> internal_foo = v8::Utils::OpenHandle(*foo);
+ Object* raw_foo = *internal_foo;
+ ser.SerializePartial(&raw_foo);
+}
+
+
+TEST(LinearAllocation) {
+ v8::V8::Initialize();
+ int new_space_max = 512 * KB;
+ for (int size = 1000; size < 5 * MB; size += size >> 1) {
+ int new_space_size = (size < new_space_max) ? size : new_space_max;
+ Heap::ReserveSpace(
+ new_space_size,
+ size, // Old pointer space.
+ size, // Old data space.
+ size, // Code space.
+ size, // Map space.
+ size, // Cell space.
+ size); // Large object space.
+ LinearAllocationScope linear_allocation_scope;
+ const int kSmallFixedArrayLength = 4;
+ const int kSmallFixedArraySize =
+ FixedArray::kHeaderSize + kSmallFixedArrayLength * kPointerSize;
+ const int kSmallStringLength = 16;
+ const int kSmallStringSize =
+ SeqAsciiString::kHeaderSize + kSmallStringLength;
+ const int kMapSize = Map::kSize;
+
+ Object* new_last = NULL;
+ for (int i = 0;
+ i + kSmallFixedArraySize <= new_space_size;
+ i += kSmallFixedArraySize) {
+ Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength);
+ if (new_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(new_last) + kSmallFixedArraySize);
+ }
+ new_last = obj;
+ }
+
+ Object* pointer_last = NULL;
+ for (int i = 0;
+ i + kSmallFixedArraySize <= size;
+ i += kSmallFixedArraySize) {
+ Object* obj = Heap::AllocateFixedArray(kSmallFixedArrayLength, TENURED);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kSmallFixedArraySize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ pointer_last = NULL;
+ }
+ if (pointer_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(pointer_last) + kSmallFixedArraySize);
+ }
+ pointer_last = obj;
+ }
+
+ Object* data_last = NULL;
+ for (int i = 0; i + kSmallStringSize <= size; i += kSmallStringSize) {
+ Object* obj = Heap::AllocateRawAsciiString(kSmallStringLength, TENURED);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kSmallStringSize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ data_last = NULL;
+ }
+ if (data_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(data_last) + kSmallStringSize);
+ }
+ data_last = obj;
+ }
+
+ Object* map_last = NULL;
+ for (int i = 0; i + kMapSize <= size; i += kMapSize) {
+ Object* obj = Heap::AllocateMap(JS_OBJECT_TYPE, 42 * kPointerSize);
+ int old_page_fullness = i % Page::kPageSize;
+ int page_fullness = (i + kMapSize) % Page::kPageSize;
+ if (page_fullness < old_page_fullness ||
+ page_fullness > Page::kObjectAreaSize) {
+ i = RoundUp(i, Page::kPageSize);
+ map_last = NULL;
+ }
+ if (map_last != NULL) {
+ CHECK_EQ(reinterpret_cast<char*>(obj),
+ reinterpret_cast<char*>(map_last) + kMapSize);
+ }
+ map_last = obj;
+ }
+
+ if (size > Page::kObjectAreaSize) {
+ // Support for reserving space in large object space is not there yet,
+ // but using an always-allocate scope is fine for now.
+ AlwaysAllocateScope always;
+ int large_object_array_length =
+ (size - FixedArray::kHeaderSize) / kPointerSize;
+ Object* obj = Heap::AllocateFixedArray(large_object_array_length,
+ TENURED);
+ CHECK(!obj->IsFailure());
+ }
+ }
+}
+
+
TEST(TestThatAlwaysSucceeds) {
}
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index 49cffb280..3fc1e0ad6 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -38,8 +38,6 @@ chapter13: UNIMPLEMENTED
chapter14: UNIMPLEMENTED
chapter15/15.1: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.1: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.2: UNIMPLEMENTED
-chapter15/15.2/15.2.3/15.2.3.3: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.4: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.5: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.6: UNIMPLEMENTED
@@ -51,6 +49,99 @@ chapter15/15.2/15.2.3/15.2.3.11: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.12: UNIMPLEMENTED
chapter15/15.2/15.2.3/15.2.3.13: UNIMPLEMENTED
+# Object.getPrototypeOf
+chapter15/15.2/15.2.3/15.2.3.2: PASS
+
+# Object.getOwnPropertyDescriptor
+chapter15/15.2/15.2.3/15.2.3.3: PASS
+
+# NOT IMPLEMENTED: defineProperty
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-3: FAIL_OK
+
+# NOT IMPLEMENTED: getOwnPropertyNames
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-16: FAIL_OK
+
+# NOT IMPLEMENTED: defineProperty
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-18: FAIL_OK
+
+# NOT IMPLEMENTED: defineProperties
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-19: FAIL_OK
+
+# NOT IMPLEMENTED: seal
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-20: FAIL_OK
+
+# NOT IMPLEMENTED: freeze
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-21: FAIL_OK
+
+# NOT IMPLEMENTED: preventExtensions
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-22: FAIL_OK
+
+# NOT IMPLEMENTED: isSealed
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-23: FAIL_OK
+
+# NOT IMPLEMENTED: isFrozen
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-24: FAIL_OK
+
+# NOT IMPLEMENTED: isExtensible
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-25: FAIL_OK
+
+# NOT IMPLEMENTED: bind
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-38: FAIL_OK
+
+# Built-ins have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-178: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-179: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-180: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-182: FAIL_OK
+
+# Our Function object has a "arguments" property which is used as a non
+# property in in the test
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-183: FAIL_OK
+
+
+# Our Function object has a "caller" property which is used as a non
+# property in in the test
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-184: FAIL_OK
+
+# Built-ins have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-185: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-186: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-187: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-188: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-189: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-190: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-191: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-192: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-193: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-194: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-195: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-201: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-210: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-211: FAIL_OK
+
+
+# NOT IMPLEMENTED: RegExp.prototype.source
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-212: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.global
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-213: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.ignoreCase
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-214: FAIL_OK
+
+# NOT IMPLEMENTED: RegExp.prototype.multiline
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-215: FAIL_OK
+
+# Errors have wrong descriptor (should all be false)
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-216: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-217: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-218: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-219: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-220: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-221: FAIL_OK
+chapter15/15.2/15.2.3/15.2.3.3/15.2.3.3-4-222: FAIL_OK
+
+
# Object.keys
chapter15/15.2/15.2.3/15.2.3.14: PASS
@@ -59,7 +150,74 @@ chapter15/15.2/15.2.3/15.2.3.14: PASS
chapter15/15.2/15.2.3/15.2.3.14/15.2.3.14-3-3: FAIL_OK
chapter15/15.3: UNIMPLEMENTED
-chapter15/15.4: UNIMPLEMENTED
+
+chapter15/15.4/15.4.4/15.4.4.14: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.15: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.20: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.21: UNIMPLEMENTED
+chapter15/15.4/15.4.4/15.4.4.22: UNIMPLEMENTED
+
+# Array.prototype.every
+chapter15/15.4/15.4.4/15.4.4.16: PASS
+
+# Wrong test - because this is not given as argument to arr.every
+# this._15_4_4_16_5_1 evaluates to undefined
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-5-1: FAIL_OK
+
+# In test case the element is not appended - it is added in the middle of
+# the array
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-7-1: FAIL_OK
+
+# We fail because the test assumes that if the reference to array is deleted it
+# is not longer traversed
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-7-7: FAIL_OK
+
+# if (val>1) in test should be if (val>2)
+chapter15/15.4/15.4.4/15.4.4.16/15.4.4.16-8-10: FAIL_OK
+
+
+# Array.prototype.some
+chapter15/15.4/15.4.4/15.4.4.17: PASS
+
+# Wrong assumption - according to spec some returns a Boolean, not a number
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-4-9: FAIL_OK
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-1
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-7-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-7-7: FAIL_OK
+
+# Same as 15.4.4.16-10-8
+chapter15/15.4/15.4.4/15.4.4.17/15.4.4.17-8-10: FAIL_OK
+
+
+# Array.prototype.forEach
+chapter15/15.4/15.4.4/15.4.4.18: PASS
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.18/15.4.4.18-7-6: FAIL_OK
+
+
+# Array.prototype.map
+chapter15/15.4/15.4.4/15.4.4.19: PASS
+
+# Same as 15.4.4.16-5-1
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-5-1: FAIL_OK
+
+# Same as 15.4.4.16-7-7
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-7: FAIL_OK
+
+# Uses a array index number as a property
+chapter15/15.4/15.4.4/15.4.4.19/15.4.4.19-8-c-iii-1: FAIL_OK
+
+
chapter15/15.5: UNIMPLEMENTED
chapter15/15.6: UNIMPLEMENTED
chapter15/15.7: UNIMPLEMENTED
diff --git a/deps/v8/test/mjsunit/bit-not.js b/deps/v8/test/mjsunit/bit-not.js
new file mode 100644
index 000000000..85eccc4a2
--- /dev/null
+++ b/deps/v8/test/mjsunit/bit-not.js
@@ -0,0 +1,75 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function testBitNot(x, name) {
+ // The VM constant folds so we use that to check the result.
+ var expected = eval("~(" + x + ")");
+ var actual = ~x;
+ assertEquals(expected, actual, "x: " + name);
+
+ // Test the path where we can overwrite the result. Use -
+ // to avoid concatenating strings.
+ expected = eval("~(" + x + " - 0.01)");
+ actual = ~(x - 0.01);
+ assertEquals(expected, actual, "x - 0.01: " + name);
+}
+
+
+testBitNot(0, 0);
+testBitNot(1, 1);
+testBitNot(-1, 1);
+testBitNot(100, 100);
+testBitNot(0x40000000, "0x40000000");
+testBitNot(0x7fffffff, "0x7fffffff");
+testBitNot(0x80000000, "0x80000000");
+
+testBitNot(2.2, 2.2);
+testBitNot(-2.3, -2.3);
+testBitNot(Infinity, "Infinity");
+testBitNot(NaN, "NaN");
+testBitNot(-Infinity, "-Infinity");
+testBitNot(0x40000000 + 0.12345, "float1");
+testBitNot(0x40000000 - 0.12345, "float2");
+testBitNot(0x7fffffff + 0.12345, "float3");
+testBitNot(0x7fffffff - 0.12345, "float4");
+testBitNot(0x80000000 + 0.12345, "float5");
+testBitNot(0x80000000 - 0.12345, "float6");
+
+testBitNot("0", "string0");
+testBitNot("2.3", "string2.3");
+testBitNot("-9.4", "string-9.4");
+
+
+// Try to test that we can deal with allocation failures in
+// the fast path and just use the slow path instead.
+function TryToGC() {
+ var x = 0x40000000;
+ for (var i = 0; i < 1000000; i++) {
+ assertEquals(~0x40000000, ~x);
+ }
+}
+TryToGC();
diff --git a/deps/v8/test/mjsunit/bitwise-operations-undefined.js b/deps/v8/test/mjsunit/bitwise-operations-undefined.js
new file mode 100644
index 000000000..716e52dfe
--- /dev/null
+++ b/deps/v8/test/mjsunit/bitwise-operations-undefined.js
@@ -0,0 +1,49 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test bitwise operations with undefined.
+
+function testUndefinedLeftHandSide() {
+ assertEquals(undefined | 1, 1);
+ assertEquals(undefined & 1, 0);
+ assertEquals(undefined ^ 1, 1);
+ assertEquals(undefined << 1, 0);
+ assertEquals(undefined >> 1, 0);
+ assertEquals(undefined >>> 1, 0);
+}
+
+function testUndefinedRightHandSide() {
+ assertEquals(1 | undefined, 1);
+ assertEquals(1 & undefined, 0);
+ assertEquals(1 ^ undefined, 1);
+ assertEquals(1 << undefined, 1);
+ assertEquals(1 >> undefined, 1);
+ assertEquals(1 >>> undefined, 1);
+}
+
+testUndefinedLeftHandSide();
+testUndefinedRightHandSide();
diff --git a/deps/v8/test/mjsunit/compare-character.js b/deps/v8/test/mjsunit/compare-character.js
new file mode 100644
index 000000000..cabe0137b
--- /dev/null
+++ b/deps/v8/test/mjsunit/compare-character.js
@@ -0,0 +1,50 @@
+// Copyright 2008 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test the optimized implementation of comparison with single-character
+// strings.
+
+var a = ['', String.fromCharCode(0), ' ', 'e', 'erik', 'f', 'foo', 'g', 'goo',
+ -1, 0, 1, 1.2, -7.9, true, false, 'foo', '0', 'NaN' ];
+for (var i in a) {
+ var x = a[i];
+ var f = 'f';
+
+ assertEquals(x == f, x == 'f', "==" + x);
+ assertEquals(x === f, x === 'f', "===" + x);
+ assertEquals(x < f, x < 'f', "<" + x);
+ assertEquals(x <= f, x <= 'f', "<=" + x);
+ assertEquals(x > f, x > 'f', ">" + x);
+ assertEquals(x >= f, x >= 'f', ">=" + x);
+ assertEquals(f == x, 'f' == x, "==r" + x);
+ assertEquals(f === x, 'f' === x, "===r" + x);
+ assertEquals(f > x, 'f' > x, "<r" + x);
+ assertEquals(f >= x, 'f' >= x, "<=r" + x);
+ assertEquals(f < x, 'f' < x, ">r" + x);
+ assertEquals(f <= x, 'f' <= x, ">=r" + x);
+}
+
diff --git a/deps/v8/test/mjsunit/compare-nan.js b/deps/v8/test/mjsunit/compare-nan.js
index fc40acc04..c4f7817ff 100644
--- a/deps/v8/test/mjsunit/compare-nan.js
+++ b/deps/v8/test/mjsunit/compare-nan.js
@@ -42,3 +42,25 @@ for (var i in a) {
assertFalse(x <= NaN, "" + x + " <= NaN");
assertFalse(x >= NaN, "" + x + " >= NaN");
}
+
+var b = ["NaN", "-1", "0", "1", "1.2", "-7.9", "true", "false", "'foo'", "'0'",
+ "'NaN'" ];
+for (var i in b) {
+ var x = b[i];
+ var program =
+ "assertFalse(NaN == " + x + ", 'NaN == ' + " + x + ");\n" +
+ "assertFalse(NaN === " + x + ", 'NaN === ' + " + x + ");\n" +
+ "assertFalse(NaN < " + x + ", 'NaN < ' + " + x + ");\n" +
+ "assertFalse(NaN > " + x + ", 'NaN > ' + " + x + ");\n" +
+ "assertFalse(NaN <= " + x + ", 'NaN <= ' + " + x + ");\n" +
+ "assertFalse(NaN >= " + x + ", 'NaN >= ' + " + x + ");\n" +
+
+ "assertFalse(" + x + " == NaN, '' + " + x + " + ' == NaN');\n" +
+ "assertFalse(" + x + " === NaN, '' + " + x + " + ' === NaN');\n" +
+ "assertFalse(" + x + " < NaN, '' + " + x + " + ' < NaN');\n" +
+ "assertFalse(" + x + " > NaN, '' + " + x + " + ' > NaN');\n" +
+ "assertFalse(" + x + " <= NaN, '' + " + x + " + ' <= NaN');\n" +
+ "assertFalse(" + x + " >= NaN, '' + " + x + " + ' >= NaN');\n";
+ eval(program);
+}
+
diff --git a/deps/v8/test/mjsunit/compiler/countoperation.js b/deps/v8/test/mjsunit/compiler/countoperation.js
new file mode 100644
index 000000000..5660cee16
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/countoperation.js
@@ -0,0 +1,111 @@
+// Copyright 2009 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test pre- and postfix count operations.
+
+// Test value context.
+var a = 42;
+var b = {x:42};
+var c = "x";
+assertEquals(43, ++a);
+assertEquals(43, a);
+assertEquals(43, a++);
+assertEquals(44, a);
+assertEquals(43, ++b.x);
+assertEquals(43, b.x);
+assertEquals(43, b.x++);
+assertEquals(44, b.x);
+assertEquals(45, ++b[c]);
+assertEquals(45, b[c]);
+assertEquals(45, b[c]++);
+assertEquals(46, b[c]);
+
+// Test effect context.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, eval("++a; 1"));
+assertEquals(43, a);
+assertEquals(1, eval("a++; 1"));
+assertEquals(44, a);
+assertEquals(1, eval("++b.x; 1"));
+assertEquals(43, b.x);
+assertEquals(1, eval("b.x++; 1"));
+assertEquals(44, b.x);
+assertEquals(1, eval("++b[c]; 1"));
+assertEquals(45, b[c]);
+assertEquals(1, eval("b[c]++; 1"));
+assertEquals(46, b[c]);
+
+// Test test context.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, (++a) ? 1 : 0);
+assertEquals(43, a);
+assertEquals(1, (a++) ? 1 : 0);
+assertEquals(44, a);
+assertEquals(1, (++b.x) ? 1 : 0);
+assertEquals(43, b.x);
+assertEquals(1, (b.x++) ? 1 : 0);
+assertEquals(44, b.x);
+assertEquals(1, (++b[c]) ? 1 : 0);
+assertEquals(45, b[c]);
+assertEquals(1, (b[c]++) ? 1 : 0);
+assertEquals(46, b[c]);
+
+// Test value/test and test/value contexts.
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(43, ++a || 1);
+assertEquals(43, a);
+assertEquals(43, a++ || 1);
+assertEquals(44, a);
+assertEquals(43, ++b.x || 1);
+assertEquals(43, b.x);
+assertEquals(43, (b.x++) || 1);
+assertEquals(44, b.x);
+assertEquals(45, ++b[c] || 1);
+assertEquals(45, b[c]);
+assertEquals(45, b[c]++ || 1);
+assertEquals(46, b[c]);
+a = 42;
+b = {x:42};
+c = "x";
+assertEquals(1, ++a && 1);
+assertEquals(43, a);
+assertEquals(1, a++ && 1);
+assertEquals(44, a);
+assertEquals(1, ++b.x && 1);
+assertEquals(43, b.x);
+assertEquals(1, (b.x++) && 1);
+assertEquals(44, b.x);
+assertEquals(1, ++b[c] && 1);
+assertEquals(45, b[c]);
+assertEquals(1, b[c]++ && 1);
+assertEquals(46, b[c]);
diff --git a/deps/v8/test/mjsunit/eval.js b/deps/v8/test/mjsunit/eval.js
index 08bd3d036..95357c73d 100644
--- a/deps/v8/test/mjsunit/eval.js
+++ b/deps/v8/test/mjsunit/eval.js
@@ -58,16 +58,16 @@ eval = global_eval;
// Test that un-aliased eval reads from local context.
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return eval('foo');
})();
assertEquals(2, result);
-//Test that un-aliased eval writes to local context.
+// Test that un-aliased eval writes to local context.
foo = 0;
-result =
+result =
(function() {
var foo = 1;
eval('foo = 2');
@@ -84,7 +84,7 @@ assertTrue(o === o.self);
// Test that aliased eval reads from global context.
var e = eval;
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return e('foo');
@@ -105,7 +105,7 @@ assertTrue(this === o.self);
// Try to cheat the 'aliased eval' detection.
var x = this;
foo = 0;
-result =
+result =
(function() {
var foo = 2;
return x.eval('foo');
@@ -113,7 +113,7 @@ result =
assertEquals(0, result);
foo = 0;
-result =
+result =
(function() {
var eval = function(x) { return x; };
var foo = eval(2);
@@ -128,8 +128,29 @@ result =
})();
assertEquals(4, result);
+result =
+ (function() {
+ eval("var eval = function(s) { return this; }");
+ return eval("42"); // Should return the global object
+ })();
+assertEquals(this, result);
+
+result =
+ (function() {
+ var obj = { f: function(eval) { return eval("this"); } };
+ return obj.f(eval);
+ })();
+assertEquals(this, result);
+
+result =
+ (function() {
+ var obj = { f: function(eval) { arguments; return eval("this"); } };
+ return obj.f(eval);
+ })();
+assertEquals(this, result);
+
eval = function(x) { return 2 * x; };
-result =
+result =
(function() {
return (function() { return eval(2); })();
})();
diff --git a/deps/v8/test/mjsunit/fuzz-natives.js b/deps/v8/test/mjsunit/fuzz-natives.js
index cd5066767..d906eb8a4 100644
--- a/deps/v8/test/mjsunit/fuzz-natives.js
+++ b/deps/v8/test/mjsunit/fuzz-natives.js
@@ -95,7 +95,11 @@ function testArgumentTypes(name, argc) {
var knownProblems = {
"Abort": true,
-
+
+ // Avoid calling the concat operation, because weird lengths
+ // may lead to out-of-memory.
+ "StringBuilderConcat": true,
+
// These functions use pseudo-stack-pointers and are not robust
// to unexpected integer values.
"DebugEvaluate": true,
@@ -114,7 +118,7 @@ var knownProblems = {
// the rest of the tests.
"DisableAccessChecks": true,
"EnableAccessChecks": true,
-
+
// These functions should not be callable as runtime functions.
"NewContext": true,
"NewArgumentsFast": true,
diff --git a/deps/v8/test/mjsunit/get-own-property-descriptor.js b/deps/v8/test/mjsunit/get-own-property-descriptor.js
new file mode 100644
index 000000000..79172c863
--- /dev/null
+++ b/deps/v8/test/mjsunit/get-own-property-descriptor.js
@@ -0,0 +1,51 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function get(){return x}
+function set(x){this.x=x};
+
+var obj = {x:1};
+obj.__defineGetter__("accessor", get);
+obj.__defineSetter__("accessor", set);
+
+
+var descIsData = Object.getOwnPropertyDescriptor(obj,'x');
+assertTrue(descIsData.enumerable);
+assertTrue(descIsData.writable);
+assertTrue(descIsData.configurable);
+
+var descIsAccessor = Object.getOwnPropertyDescriptor(obj, 'accessor');
+assertTrue(descIsAccessor.enumerable);
+assertTrue(descIsAccessor.configurable);
+assertTrue(descIsAccessor.get == get);
+assertTrue(descIsAccessor.set == set);
+
+var descIsNotData = Object.getOwnPropertyDescriptor(obj, 'not-x');
+assertTrue(descIsNotData == undefined);
+
+var descIsNotAccessor = Object.getOwnPropertyDescriptor(obj, 'not-accessor');
+assertTrue(descIsNotAccessor == undefined);
diff --git a/deps/v8/test/mjsunit/get-prototype-of.js b/deps/v8/test/mjsunit/get-prototype-of.js
new file mode 100644
index 000000000..6475bde65
--- /dev/null
+++ b/deps/v8/test/mjsunit/get-prototype-of.js
@@ -0,0 +1,68 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+function TryGetPrototypeOfNonObject(x) {
+ var caught = 0;
+ try {
+ Object.getPrototypeOf(x);
+ } catch (e) {
+ caught = e;
+ }
+
+ assertTrue(caught instanceof TypeError);
+};
+
+function GetPrototypeOfObject(x) {
+ assertDoesNotThrow(Object.getPrototypeOf(x));
+ assertNotNull(Object.getPrototypeOf(x));
+ assertEquals(Object.getPrototypeOf(x), x.__proto__);
+}
+
+function F(){};
+
+// Non object
+var x = 10;
+
+// Object
+var y = new F();
+
+// Make sure that TypeError exceptions are thrown when non-objects are passed
+// as argument
+TryGetPrototypeOfNonObject(0);
+TryGetPrototypeOfNonObject(null);
+TryGetPrototypeOfNonObject('Testing');
+TryGetPrototypeOfNonObject(x);
+
+// Make sure the real objects have this method and that it returns the
+// actual prototype object. Also test for Functions and RegExp.
+GetPrototypeOfObject(this);
+GetPrototypeOfObject(y);
+GetPrototypeOfObject({x:5});
+GetPrototypeOfObject(F);
+GetPrototypeOfObject(RegExp);
+
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index bf44f7814..35e16340c 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -65,9 +65,9 @@ assertEquals("tolf", (new String("tolf")).toJSON());
GenericToJSONChecks(String, "x", "y");
// Date toJSON
-assertEquals("1970-01-01T00:00:00Z", new Date(0).toJSON());
-assertEquals("1979-01-11T08:00:00Z", new Date("1979-01-11 08:00 GMT").toJSON());
-assertEquals("2005-05-05T05:05:05Z", new Date("2005-05-05 05:05:05 GMT").toJSON());
+assertEquals("1970-01-01T00:00:00.000Z", new Date(0).toJSON());
+assertEquals("1979-01-11T08:00:00.000Z", new Date("1979-01-11 08:00 GMT").toJSON());
+assertEquals("2005-05-05T05:05:05.000Z", new Date("2005-05-05 05:05:05 GMT").toJSON());
var n1 = new Date(10000);
n1.toISOString = function () { return "foo"; };
assertEquals("foo", n1.toJSON());
diff --git a/deps/v8/test/mjsunit/mirror-date.js b/deps/v8/test/mjsunit/mirror-date.js
index 6b6a3ad40..5c113de26 100644
--- a/deps/v8/test/mjsunit/mirror-date.js
+++ b/deps/v8/test/mjsunit/mirror-date.js
@@ -57,7 +57,7 @@ function testDateMirror(d, iso8601) {
// Test Date values.
testDateMirror(new Date(Date.parse("Dec 25, 1995 1:30 UTC")),
- "1995-12-25T01:30:00Z");
+ "1995-12-25T01:30:00.000Z");
d = new Date();
d.setUTCFullYear(1967);
d.setUTCMonth(0); // January.
@@ -66,10 +66,12 @@ d.setUTCHours(9);
d.setUTCMinutes(22);
d.setUTCSeconds(59);
d.setUTCMilliseconds(0);
-testDateMirror(d, "1967-01-17T09:22:59Z");
+testDateMirror(d, "1967-01-17T09:22:59.000Z");
d.setUTCMilliseconds(1);
-testDateMirror(d, "1967-01-17T09:22:59Z");
+testDateMirror(d, "1967-01-17T09:22:59.001Z");
d.setUTCSeconds(12);
-testDateMirror(d, "1967-01-17T09:22:12Z");
+testDateMirror(d, "1967-01-17T09:22:12.001Z");
d.setUTCSeconds(36);
-testDateMirror(d, "1967-01-17T09:22:36Z");
+testDateMirror(d, "1967-01-17T09:22:36.001Z");
+d.setUTCMilliseconds(136);
+testDateMirror(d, "1967-01-17T09:22:36.136Z");
diff --git a/deps/v8/test/mjsunit/smi-ops.js b/deps/v8/test/mjsunit/smi-ops.js
index 284050d36..39b489450 100644
--- a/deps/v8/test/mjsunit/smi-ops.js
+++ b/deps/v8/test/mjsunit/smi-ops.js
@@ -537,7 +537,7 @@ function testShiftNonSmis() {
one = four - three;
zero = one - one;
- // Begin block A repeat 3
+ // Begin block A repeat 3
assertEquals(pos_non_smi, (pos_non_smi) >> zero);
assertEquals(pos_non_smi, (pos_non_smi) >>> zero);
assertEquals(pos_non_smi, (pos_non_smi) << zero);
@@ -638,6 +638,31 @@ function testShiftNonSmis() {
testShiftNonSmis();
+function intConversion() {
+ function foo(x) {
+ assertEquals(x, (x * 1.0000000001) | 0, "foo more " + x);
+ assertEquals(x, x | 0, "foo " + x);
+ if (x > 0) {
+ assertEquals(x - 1, (x * 0.9999999999) | 0, "foo less " + x);
+ } else {
+ assertEquals(x + 1, (x * 0.9999999999) | 0, "foo less " + x);
+ }
+ }
+ for (var i = 1; i < 0x80000000; i *= 2) {
+ foo(i);
+ foo(-i);
+ }
+ for (var i = 1; i < 1/0; i *= 2) {
+ assertEquals(i | 0, (i * 1.0000000000000001) | 0, "b" + i);
+ assertEquals(-i | 0, (i * -1.0000000000000001) | 0, "c" + i);
+ }
+ for (var i = 0.5; i > 0; i /= 2) {
+ assertEquals(0, i | 0, "d" + i);
+ assertEquals(0, -i | 0, "e" + i);
+ }
+}
+
+intConversion();
// Verify that we handle the (optimized) corner case of shifting by
// zero even for non-smis.
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index ba7224b4a..4368eb81b 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -199,9 +199,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing the
- # code would be better
- ['target_arch=="arm" and _toolset=="host"', {
+ ['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
@@ -428,9 +426,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
- # the code would be better
- ['_toolset=="host"', {
+ ['host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
@@ -598,9 +594,7 @@
'conditions': [
# The ARM assembler assumes the host is 32 bits, so force building
# 32-bit host tools.
- # TODO(piman): This assumes that the host is ia32 or amd64. Fixing
- # the code would be better
- ['target_arch=="arm" and _toolset=="host"', {
+ ['target_arch=="arm" and host_arch=="x64" and _toolset=="host"', {
'cflags': ['-m32'],
'ldflags': ['-m32'],
}]
diff --git a/deps/v8/tools/jsmin.py b/deps/v8/tools/jsmin.py
index fd1abe48f..646bf143a 100644
--- a/deps/v8/tools/jsmin.py
+++ b/deps/v8/tools/jsmin.py
@@ -230,7 +230,9 @@ class JavaScriptMinifier(object):
# A regexp that matches a literal string surrounded by 'double quotes'.
single_quoted_string = r"'(?:[^'\\]|\\.)*'"
# A regexp that matches a regexp literal surrounded by /slashes/.
- slash_quoted_regexp = r"/(?:[^/\\]|\\.)+/"
+ # Don't allow a regexp to have a ) before the first ( since that's a
+ # syntax error and it's probably just two unrelated slashes.
+ slash_quoted_regexp = r"/(?:(?=\()|(?:[^()/\\]|\\.)+)(?:\([^/\\]|\\.)*/"
# Replace multiple spaces with a single space.
line = re.sub("|".join([double_quoted_string,
single_quoted_string,
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index 3f27c001a..04952e0b4 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -221,7 +221,7 @@ class CppLintProcessor(SourceFileProcessor):
COPYRIGHT_HEADER_PATTERN = re.compile(
- r'Copyright [\d-]*200[8-9] the V8 project authors. All rights reserved.')
+ r'Copyright [\d-]*20[0-1][0-9] the V8 project authors. All rights reserved.')
class SourceProcessor(SourceFileProcessor):
"""