summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorRyan Dahl <ry@tinyclouds.org>2011-12-14 15:02:32 -0800
committerRyan Dahl <ry@tinyclouds.org>2011-12-14 15:02:32 -0800
commitb3a7de15b7f06e11bd326b60b0e5ffd762ae71c5 (patch)
tree5bd6feac02a7c9eed1fbc03fc678e952ab3a852f /deps/v8
parentbe23c51f6979ef5fd519069a62648d81f25b2ec0 (diff)
downloadnode-b3a7de15b7f06e11bd326b60b0e5ffd762ae71c5.tar.gz
Upgrade V8 to 3.8.0
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/ChangeLog12
-rw-r--r--deps/v8/build/common.gypi1
-rw-r--r--deps/v8/include/v8-profiler.h3
-rw-r--r--deps/v8/src/api.cc57
-rw-r--r--deps/v8/src/arm/builtins-arm.cc7
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc287
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc8
-rw-r--r--deps/v8/src/arm/ic-arm.cc3
-rw-r--r--deps/v8/src/arm/lithium-arm.cc15
-rw-r--r--deps/v8/src/arm/lithium-arm.h2
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc227
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc15
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h2
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc20
-rw-r--r--deps/v8/src/assembler.cc14
-rw-r--r--deps/v8/src/ast.cc2
-rw-r--r--deps/v8/src/ast.h8
-rw-r--r--deps/v8/src/bootstrapper.cc2
-rw-r--r--deps/v8/src/builtins.cc72
-rw-r--r--deps/v8/src/code-stubs.cc61
-rw-r--r--deps/v8/src/code-stubs.h28
-rw-r--r--deps/v8/src/compiler.cc2
-rw-r--r--deps/v8/src/debug-agent.cc2
-rw-r--r--deps/v8/src/debug-agent.h1
-rw-r--r--deps/v8/src/debug.cc34
-rw-r--r--deps/v8/src/elements.cc38
-rw-r--r--deps/v8/src/elements.h17
-rw-r--r--deps/v8/src/factory.cc28
-rw-r--r--deps/v8/src/factory.h13
-rw-r--r--deps/v8/src/frames.cc17
-rw-r--r--deps/v8/src/heap-inl.h3
-rw-r--r--deps/v8/src/heap.cc115
-rw-r--r--deps/v8/src/heap.h1
-rw-r--r--deps/v8/src/hydrogen-instructions.cc5
-rw-r--r--deps/v8/src/hydrogen-instructions.h54
-rw-r--r--deps/v8/src/hydrogen.cc194
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc138
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h9
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc50
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc623
-rw-r--r--deps/v8/src/ia32/debug-ia32.cc4
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc8
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc47
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc6
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc3
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc276
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h8
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc24
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h19
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc32
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h7
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc26
-rw-r--r--deps/v8/src/ic-inl.h4
-rw-r--r--deps/v8/src/ic.cc89
-rw-r--r--deps/v8/src/ic.h27
-rw-r--r--deps/v8/src/mark-compact.cc149
-rw-r--r--deps/v8/src/mark-compact.h12
-rw-r--r--deps/v8/src/messages.js1
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc595
-rw-r--r--deps/v8/src/mips/codegen-mips.cc92
-rw-r--r--deps/v8/src/mips/codegen-mips.h15
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc101
-rw-r--r--deps/v8/src/mips/ic-mips.cc3
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc400
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h7
-rw-r--r--deps/v8/src/mips/lithium-mips.cc29
-rw-r--r--deps/v8/src/mips/lithium-mips.h18
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc15
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h1
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc20
-rw-r--r--deps/v8/src/objects-inl.h161
-rw-r--r--deps/v8/src/objects.cc138
-rw-r--r--deps/v8/src/objects.h85
-rw-r--r--deps/v8/src/parser.cc49
-rw-r--r--deps/v8/src/parser.h5
-rw-r--r--deps/v8/src/platform-posix.cc5
-rw-r--r--deps/v8/src/platform-win32.cc5
-rw-r--r--deps/v8/src/platform.h4
-rw-r--r--deps/v8/src/preparser.cc1
-rw-r--r--deps/v8/src/profile-generator-inl.h20
-rw-r--r--deps/v8/src/profile-generator.cc242
-rw-r--r--deps/v8/src/profile-generator.h31
-rw-r--r--deps/v8/src/runtime.cc43
-rw-r--r--deps/v8/src/scopes.cc43
-rw-r--r--deps/v8/src/scopes.h5
-rw-r--r--deps/v8/src/spaces.cc6
-rw-r--r--deps/v8/src/store-buffer.cc55
-rw-r--r--deps/v8/src/store-buffer.h4
-rw-r--r--deps/v8/src/stub-cache.cc4
-rw-r--r--deps/v8/src/stub-cache.h10
-rw-r--r--deps/v8/src/type-info.cc15
-rw-r--r--deps/v8/src/type-info.h1
-rw-r--r--deps/v8/src/v8natives.js106
-rw-r--r--deps/v8/src/v8threads.h2
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64.cc166
-rw-r--r--deps/v8/src/x64/assembler-x64.h7
-rw-r--r--deps/v8/src/x64/builtins-x64.cc3
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc414
-rw-r--r--deps/v8/src/x64/debug-x64.cc4
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc8
-rw-r--r--deps/v8/src/x64/disasm-x64.cc20
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc2
-rw-r--r--deps/v8/src/x64/ic-x64.cc3
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc261
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h3
-rw-r--r--deps/v8/src/x64/lithium-x64.cc8
-rw-r--r--deps/v8/src/x64/lithium-x64.h2
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc39
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h8
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc26
-rw-r--r--deps/v8/test/cctest/SConscript3
-rw-r--r--deps/v8/test/cctest/cctest.status10
-rw-r--r--deps/v8/test/cctest/test-api.cc10
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc68
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc81
-rw-r--r--deps/v8/test/cctest/test-debug.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc5
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc429
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc102
-rw-r--r--deps/v8/test/cctest/test-heap.cc9
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc92
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc198
-rw-r--r--deps/v8/test/es5conform/es5conform.status5
-rw-r--r--deps/v8/test/message/message.status7
-rw-r--r--deps/v8/test/mjsunit/array-construct-transition.js39
-rw-r--r--deps/v8/test/mjsunit/array-literal-transitions.js80
-rw-r--r--deps/v8/test/mjsunit/compiler/regress-106351.js38
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js2
-rw-r--r--deps/v8/test/mjsunit/elements-transition.js10
-rw-r--r--deps/v8/test/mjsunit/harmony/block-const-assign.js131
-rw-r--r--deps/v8/test/mjsunit/harmony/block-let-crankshaft.js198
-rw-r--r--deps/v8/test/mjsunit/math-pow.js257
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status42
-rw-r--r--deps/v8/test/mjsunit/regress/regress-397.js17
-rw-r--r--deps/v8/test/mjsunit/regress/regress-97116.js50
-rw-r--r--deps/v8/test/mjsunit/string-external-cached.js6
-rwxr-xr-xdeps/v8/test/mjsunit/string-slices.js17
-rw-r--r--deps/v8/test/mozilla/mozilla.status70
-rw-r--r--deps/v8/test/preparser/preparser.status6
-rw-r--r--deps/v8/test/sputnik/sputnik.status13
-rw-r--r--deps/v8/test/test262/test262.status203
-rwxr-xr-xdeps/v8/tools/test-wrapper-gypbuild.py2
143 files changed, 5935 insertions, 2513 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 4d629810b..33df4603c 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,15 @@
+2011-12-13: Version 3.8.0
+
+ Fixed handling of arrays in DefineOwnProperty. (issue 1756)
+
+ Sync parser and preparser on do-while and return statements.
+ (issue 1856)
+
+ Fixed another corner case for DefineOwnProperty on arrays (issue 1756).
+
+ Stability and performance improvements on all platforms.
+
+
2011-12-01: Version 3.7.12
Increase tick interval for the android platform.
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 861c87d29..9129d0170 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -303,6 +303,7 @@
}],
['OS=="win"', {
'msvs_configuration_attributes': {
+ 'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
'CharacterSet': '1',
},
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index f67646f54..27b3c6def 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -219,8 +219,9 @@ class V8EXPORT HeapGraphEdge {
// (e.g. parts of a ConsString).
kHidden = 4, // A link that is needed for proper sizes
// calculation, but may be hidden from user.
- kShortcut = 5 // A link that must not be followed during
+ kShortcut = 5, // A link that must not be followed during
// sizes calculation.
+ kWeak = 6 // A weak reference (ignored by the GC).
};
/** Returns edge type (see HeapGraphEdge::Type). */
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 35b8aa0fc..7eaadbb6b 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1462,31 +1462,35 @@ Local<Script> Script::New(v8::Handle<String> source,
ON_BAILOUT(isolate, "v8::Script::New()", return Local<Script>());
LOG_API(isolate, "Script::New");
ENTER_V8(isolate);
- i::Handle<i::String> str = Utils::OpenHandle(*source);
- i::Handle<i::Object> name_obj;
- int line_offset = 0;
- int column_offset = 0;
- if (origin != NULL) {
- if (!origin->ResourceName().IsEmpty()) {
- name_obj = Utils::OpenHandle(*origin->ResourceName());
- }
- if (!origin->ResourceLineOffset().IsEmpty()) {
- line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+ i::SharedFunctionInfo* raw_result = NULL;
+ { i::HandleScope scope(isolate);
+ i::Handle<i::String> str = Utils::OpenHandle(*source);
+ i::Handle<i::Object> name_obj;
+ int line_offset = 0;
+ int column_offset = 0;
+ if (origin != NULL) {
+ if (!origin->ResourceName().IsEmpty()) {
+ name_obj = Utils::OpenHandle(*origin->ResourceName());
+ }
+ if (!origin->ResourceLineOffset().IsEmpty()) {
+ line_offset = static_cast<int>(origin->ResourceLineOffset()->Value());
+ }
+ if (!origin->ResourceColumnOffset().IsEmpty()) {
+ column_offset =
+ static_cast<int>(origin->ResourceColumnOffset()->Value());
+ }
}
- if (!origin->ResourceColumnOffset().IsEmpty()) {
- column_offset = static_cast<int>(origin->ResourceColumnOffset()->Value());
+ EXCEPTION_PREAMBLE(isolate);
+ i::ScriptDataImpl* pre_data_impl =
+ static_cast<i::ScriptDataImpl*>(pre_data);
+ // We assert that the pre-data is sane, even though we can actually
+ // handle it if it turns out not to be in release mode.
+ ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
+ // If the pre-data isn't sane we simply ignore it
+ if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
+ pre_data_impl = NULL;
}
- }
- EXCEPTION_PREAMBLE(isolate);
- i::ScriptDataImpl* pre_data_impl = static_cast<i::ScriptDataImpl*>(pre_data);
- // We assert that the pre-data is sane, even though we can actually
- // handle it if it turns out not to be in release mode.
- ASSERT(pre_data_impl == NULL || pre_data_impl->SanityCheck());
- // If the pre-data isn't sane we simply ignore it
- if (pre_data_impl != NULL && !pre_data_impl->SanityCheck()) {
- pre_data_impl = NULL;
- }
- i::Handle<i::SharedFunctionInfo> result =
+ i::Handle<i::SharedFunctionInfo> result =
i::Compiler::Compile(str,
name_obj,
line_offset,
@@ -1495,8 +1499,11 @@ Local<Script> Script::New(v8::Handle<String> source,
pre_data_impl,
Utils::OpenHandle(*script_data),
i::NOT_NATIVES_CODE);
- has_pending_exception = result.is_null();
- EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+ has_pending_exception = result.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+ raw_result = *result;
+ }
+ i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
return Local<Script>(ToApi<Script>(result));
}
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index d0136f550..0e28241b5 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -394,13 +394,18 @@ static void ArrayNativeCode(MacroAssembler* masm,
// r5: elements_array_end (untagged)
// sp[0]: last argument
Label loop, entry;
+ __ mov(r7, sp);
__ jmp(&entry);
__ bind(&loop);
- __ ldr(r2, MemOperand(sp, kPointerSize, PostIndex));
+ __ ldr(r2, MemOperand(r7, kPointerSize, PostIndex));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(r2, call_generic_code);
+ }
__ str(r2, MemOperand(r5, -kPointerSize, PreIndex));
__ bind(&entry);
__ cmp(r4, r5);
__ b(lt, &loop);
+ __ mov(sp, r7);
// Remove caller arguments and receiver from the stack, setup return value and
// return.
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 8b1d0c4b3..282df1565 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -3455,110 +3455,202 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (CpuFeatures::IsSupported(VFP3)) {
- CpuFeatures::Scope scope(VFP3);
-
- Label base_not_smi;
- Label exponent_not_smi;
- Label convert_exponent;
-
- const Register base = r0;
- const Register exponent = r1;
- const Register heapnumbermap = r5;
- const Register heapnumber = r6;
- const DoubleRegister double_base = d0;
- const DoubleRegister double_exponent = d1;
- const DoubleRegister double_result = d2;
- const SwVfpRegister single_scratch = s0;
- const Register scratch = r9;
- const Register scratch2 = r7;
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ CpuFeatures::Scope vfp3_scope(VFP3);
+ const Register base = r1;
+ const Register exponent = r2;
+ const Register heapnumbermap = r5;
+ const Register heapnumber = r0;
+ const DoubleRegister double_base = d1;
+ const DoubleRegister double_exponent = d2;
+ const DoubleRegister double_result = d3;
+ const DoubleRegister double_scratch = d0;
+ const SwVfpRegister single_scratch = s0;
+ const Register scratch = r9;
+ const Register scratch2 = r7;
+
+ Label call_runtime, done, exponent_not_smi, int_exponent;
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
__ ldr(base, MemOperand(sp, 1 * kPointerSize));
__ ldr(exponent, MemOperand(sp, 0 * kPointerSize));
- // Convert base to double value and store it in d0.
- __ JumpIfNotSmi(base, &base_not_smi);
- // Base is a Smi. Untag and convert it.
- __ SmiUntag(base);
- __ vmov(single_scratch, base);
- __ vcvt_f64_s32(double_base, single_scratch);
- __ b(&convert_exponent);
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ bind(&base_not_smi);
+ __ JumpIfSmi(base, &base_is_smi);
__ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
- // Base is a heapnumber. Load it into double register.
+
__ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ vmov(single_scratch, base);
+ __ vcvt_f64_s32(double_base, single_scratch);
+ __ bind(&unpack_exponent);
- __ bind(&convert_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
-
- // The base is in a double register and the exponent is
- // an untagged smi. Allocate a heap number and call a
- // C function for integer exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(lr);
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()),
- 1, 1);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
- }
- __ vstr(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
+ __ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ cmp(scratch, heapnumbermap);
__ b(ne, &call_runtime);
- // Exponent is a heapnumber. Load it into double register.
__ vldr(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ // Base is already in double_base.
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ vldr(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ vcvt_u32_f64(single_scratch, double_exponent);
+ // We do not check for NaN or Infinity here because comparing numbers on
+ // ARM correctly distinguishes NaNs. We end up calling the built-in.
+ __ vcvt_f64_u32(double_scratch, single_scratch);
+ __ VFPCompareAndSetFlags(double_scratch, double_exponent);
+ __ b(eq, &int_exponent_convert);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half;
+
+ // Test for 0.5.
+ __ vmov(double_scratch, 0.5);
+ __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+ __ b(ne, &not_plus_half);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ vmov(double_scratch, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(double_base, double_scratch);
+ __ vneg(double_result, double_scratch, eq);
+ __ b(eq, &done);
+
+ // Add +0 to convert -0 to +0.
+ __ vadd(double_scratch, double_base, kDoubleRegZero);
+ __ vsqrt(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&not_plus_half);
+ __ vmov(double_scratch, -0.5);
+ __ VFPCompareAndSetFlags(double_exponent, double_scratch);
+ __ b(ne, &call_runtime);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ vmov(double_scratch, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(double_base, double_scratch);
+ __ vmov(double_result, kDoubleRegZero, eq);
+ __ b(eq, &done);
+
+ // Add +0 to convert -0 to +0.
+ __ vadd(double_scratch, double_base, kDoubleRegZero);
+ __ vmov(double_result, 1);
+ __ vsqrt(double_scratch, double_scratch);
+ __ vdiv(double_result, double_result, double_scratch);
+ __ jmp(&done);
+ }
- // The base and the exponent are in double registers.
- // Allocate a heap number and call a C function for
- // double exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
__ push(lr);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
0, 2);
- __ pop(lr);
- __ GetCFunctionDoubleResult(double_result);
}
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
+ __ vcvt_u32_f64(single_scratch, double_exponent);
+ __ vmov(exponent, single_scratch);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ __ mov(scratch, exponent); // Back up exponent.
+ __ vmov(double_scratch, double_base); // Back up base.
+ __ vmov(double_result, 1.0);
+
+ // Get absolute value of exponent.
+ __ cmp(scratch, Operand(0));
+ __ mov(scratch2, Operand(0), LeaveCC, mi);
+ __ sub(scratch, scratch2, scratch, LeaveCC, mi);
+
+ Label while_true;
+ __ bind(&while_true);
+ __ mov(scratch, Operand(scratch, ASR, 1), SetCC);
+ __ vmul(double_result, double_result, double_scratch, cs);
+ __ vmul(double_scratch, double_scratch, double_scratch, ne);
+ __ b(ne, &while_true);
+
+ __ cmp(exponent, Operand(0));
+ __ b(ge, &done);
+ __ vmov(double_scratch, 1.0);
+ __ vdiv(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ VFPCompareAndSetFlags(double_result, 0.0);
+ __ b(ne, &done);
+ // double_exponent may not containe the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ vmov(single_scratch, exponent);
+ __ vcvt_f64_s32(double_exponent, single_scratch);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(
+ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ vstr(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(r0, heapnumber);
- __ Ret(2 * kPointerSize);
- }
+ ASSERT(heapnumber.is(r0));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret(2);
+ } else {
+ __ push(lr);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ }
+ __ pop(lr);
+ __ GetCFunctionDoubleResult(double_result);
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
}
@@ -6628,26 +6720,47 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- __ Push(r1, r0);
- __ push(lr);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ and_(r2, r1, Operand(r0));
+ __ JumpIfSmi(r2, &miss);
+ __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
+ __ cmp(r2, Operand(known_map_));
+ __ b(ne, &miss);
+ __ cmp(r3, Operand(known_map_));
+ __ b(ne, &miss);
+
+ __ sub(r0, r0, Operand(r1));
+ __ Ret();
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(r1, r0);
+ __ push(lr);
+ __ Push(r1, r0);
__ mov(ip, Operand(Smi::FromInt(op_)));
__ push(ip);
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ pop(lr);
+ __ pop(r0);
+ __ pop(r1);
}
- // Compute the entry point of the rewritten stub.
- __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(lr);
- __ pop(r0);
- __ pop(r1);
+
__ Jump(r2);
}
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index fdd326618..7e9a88911 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -2938,8 +2938,12 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
+ if (CpuFeatures::IsSupported(VFP3)) {
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ }
context()->Plug(r0);
}
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index f8e4bbb6b..abbac993d 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1587,6 +1587,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 234177476..1ac152d6a 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -1153,6 +1153,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
+ } else if (op == kMathPowHalf) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LOperand* temp = FixedTemp(d3);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1166,8 +1171,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1402,7 +1405,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* left = UseFixedDouble(instr->left(), d1);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), d2) :
- UseFixed(instr->right(), r0);
+ UseFixed(instr->right(), r2);
LPower* result = new LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, d3),
instr,
@@ -1795,7 +1798,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1809,7 +1813,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
- return new LStoreContextSlot(context, value);
+ LInstruction* result = new LStoreContextSlot(context, value);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 6051ad973..628c3d1a5 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -1793,6 +1793,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 22a504fc6..25532a2d9 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -321,7 +321,22 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
- __ mov(scratch, ToOperand(op));
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("EmitLoadRegister: Unsupported double immediate.");
+ } else {
+ ASSERT(r.IsTagged());
+ if (literal->IsSmi()) {
+ __ mov(scratch, Operand(literal));
+ } else {
+ __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+ }
+ }
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ ldr(scratch, ToMemOperand(op));
@@ -1337,8 +1352,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ mov(ToRegister(instr->result()), Operand(instr->value()));
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ mov(ToRegister(instr->result()), Operand(value));
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -2164,7 +2184,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(r4));
- __ mov(InstanceofStub::right(), Operand(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 4;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2263,21 +2283,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ str(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-
- // Cells are always in the remembered set.
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteField(scratch,
- JSGlobalPropertyCell::kValueOffset,
- value,
- scratch2,
- kLRHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- check_needed);
- }
+ // Cells are always rescanned, so no write barrier here.
}
@@ -2297,6 +2303,11 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ ldr(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, ip);
+ DeoptimizeIf(eq, instr->environment());
+ }
}
@@ -2304,6 +2315,13 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = scratch0();
+ __ ldr(scratch, target);
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ DeoptimizeIf(eq, instr->environment());
+ }
__ str(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@@ -2355,7 +2373,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2800,7 +2818,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2868,7 +2886,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->function()));
+ __ LoadHeapObject(r1, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -3053,11 +3071,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ and_(scratch, result, Operand(HeapNumber::kSignMask));
__ Vmov(double_scratch0(), 0.5);
- __ vadd(input, input, double_scratch0());
+ __ vadd(double_scratch0(), input, double_scratch0());
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
- __ vmov(result, input.high());
+ __ vmov(result, double_scratch0().high());
__ eor(result, result, Operand(scratch), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
DeoptimizeIf(mi, instr->environment());
@@ -3068,7 +3086,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ EmitVFPTruncate(kRoundToMinusInf,
double_scratch0().low(),
- input,
+ double_scratch0(),
result,
scratch);
DeoptimizeIf(ne, instr->environment());
@@ -3097,68 +3115,53 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
+ DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done;
+ __ vmov(temp, -V8_INFINITY);
+ __ VFPCompareAndSetFlags(input, temp);
+ __ vneg(result, temp, eq);
+ __ b(&done, eq);
+
// Add +0 to convert -0 to +0.
__ vadd(result, input, kDoubleRegZero);
__ vsqrt(result, result);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- Register scratch = scratch0();
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left),
- ToDoubleRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
- } else if (exponent_type.IsInteger32()) {
- ASSERT(ToRegister(right).is(r0));
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 1, 1);
- } else {
- ASSERT(exponent_type.IsTagged());
- ASSERT(instr->hydrogen()->left()->representation().IsDouble());
-
- Register right_reg = ToRegister(right);
-
- // Check for smi on the right hand side.
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
-
- // Untag smi and convert it to a double.
- __ SmiUntag(right_reg);
- SwVfpRegister single_scratch = double_scratch0().low();
- __ vmov(single_scratch, right_reg);
- __ vcvt_f64_s32(result_reg, single_scratch);
- __ jmp(&call);
-
- // Heap number map check.
- __ bind(&non_smi);
- __ ldr(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(d2));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(r2));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(d1));
+ ASSERT(ToDoubleRegister(instr->result()).is(d3));
+
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(r2, &no_deopt);
+ __ ldr(r7, FieldMemOperand(r2, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(scratch, Operand(ip));
+ __ cmp(r7, Operand(ip));
DeoptimizeIf(ne, instr->environment());
- int32_t value_offset = HeapNumber::kValueOffset - kHeapObjectTag;
- __ add(scratch, right_reg, Operand(value_offset));
- __ vldr(result_reg, scratch, 0);
-
- // Prepare arguments and call C function.
- __ bind(&call);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Store the result in the result register.
- __ GetCFunctionDoubleResult(result_reg);
}
@@ -3294,7 +3297,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(r0));
- __ mov(r1, Operand(instr->target()));
+ __ LoadHeapObject(r1, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -4118,9 +4121,18 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ cmp(reg, Operand(instr->hydrogen()->target()));
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Register reg = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ mov(ip, Operand(Handle<Object>(cell)));
+ __ ldr(ip, FieldMemOperand(ip, JSGlobalPropertyCell::kValueOffset));
+ __ cmp(reg, ip);
+ } else {
+ __ cmp(reg, Operand(target));
+ }
DeoptimizeIf(ne, instr->environment());
}
@@ -4189,19 +4201,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand(cell));
- __ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ mov(result, Operand(object));
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@@ -4210,7 +4209,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -4220,7 +4219,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
@@ -4231,15 +4230,31 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(r1, instr->hydrogen()->boilerplate_object());
+ // Load map into r2.
+ __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ __ cmp(r2, Operand(boilerplate_elements_kind));
+ DeoptimizeIf(ne, instr->environment());
+ }
__ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ mov(r1, Operand(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ mov(r1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
@@ -4256,9 +4271,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4297,10 +4312,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(r2, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
@@ -4326,7 +4341,7 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(r1, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset);
ASSERT_EQ(size, offset);
}
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index 4fc3b03ab..59a5e5ba7 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -407,6 +407,19 @@ void MacroAssembler::StoreRoot(Register source,
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ mov(result, Operand(cell));
+ ldr(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ mov(result, Operand(object));
+ }
+}
+
+
void MacroAssembler::InNewSpace(Register object,
Register scratch,
Condition cond,
@@ -1111,7 +1124,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(r1, Operand(function));
+ LoadHeapObject(r1, function);
ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 2725883ee..9d7463322 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -166,6 +166,8 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond = al);
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
+
// ---------------------------------------------------------------------------
// GC Support
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index 47778f580..b6b2ee2f0 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -575,7 +575,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(r5, Operand(function));
+ __ LoadHeapObject(r5, function);
__ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@@ -1099,7 +1099,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1110,7 +1110,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(r0, Operand(value));
+ __ LoadHeapObject(r0, value);
__ Ret();
}
@@ -2587,15 +2587,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
-
- __ mov(r1, r0);
- __ RecordWriteField(r4,
- JSGlobalPropertyCell::kValueOffset,
- r1,
- r2,
- kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET);
+ // Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, r4, r3);
@@ -2690,7 +2682,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -2830,7 +2822,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index bc05c0180..941f45c21 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -1113,17 +1113,9 @@ double power_double_int(double x, int y) {
double power_double_double(double x, double y) {
- int y_int = static_cast<int>(y);
- if (y == y_int) {
- return power_double_int(x, y_int); // Returns 1.0 for exponent 0.
- }
- if (!isinf(x)) {
- if (y == 0.5) return sqrt(x + 0.0); // -0 must be converted to +0.
- if (y == -0.5) return 1.0 / sqrt(x + 0.0);
- }
- if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return OS::nan_value();
- }
+ // The checks for special cases can be dropped in ia32 because it has already
+ // been done in generated code before bailing out here.
+ if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) return OS::nan_value();
return pow(x, y);
}
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 13e55894d..079335622 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -70,6 +70,7 @@ VariableProxy::VariableProxy(Isolate* isolate, Variable* var)
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
is_trivial_(false),
+ is_lvalue_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
}
@@ -84,6 +85,7 @@ VariableProxy::VariableProxy(Isolate* isolate,
var_(NULL),
is_this_(is_this),
is_trivial_(false),
+ is_lvalue_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index 805526af5..9b90d816d 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -1159,12 +1159,17 @@ class VariableProxy: public Expression {
bool IsArguments() { return var_ != NULL && var_->is_arguments(); }
+ bool IsLValue() {
+ return is_lvalue_;
+ }
+
Handle<String> name() const { return name_; }
Variable* var() const { return var_; }
bool is_this() const { return is_this_; }
int position() const { return position_; }
void MarkAsTrivial() { is_trivial_ = true; }
+ void MarkAsLValue() { is_lvalue_ = true; }
// Bind this proxy to the variable var.
void BindTo(Variable* var);
@@ -1174,6 +1179,9 @@ class VariableProxy: public Expression {
Variable* var_; // resolved variable, or NULL
bool is_this_;
bool is_trivial_;
+ // True if this variable proxy is being used in an assignment
+ // or with a increment/decrement operator.
+ bool is_lvalue_;
int position_;
};
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 29c16ee93..6d388a568 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -299,7 +299,7 @@ class Genesis BASE_EMBEDDED {
void Bootstrapper::Iterate(ObjectVisitor* v) {
extensions_cache_.Iterate(v);
- v->Synchronize("Extensions");
+ v->Synchronize(VisitorSynchronization::kExtensions);
}
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index 43cf358d4..4d874c54f 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -233,30 +233,57 @@ BUILTIN(ArrayCodeGeneric) {
return array->Initialize(JSArray::kPreallocatedArrayElements);
}
- // Take the arguments as elements.
- int number_of_elements = args.length() - 1;
- Smi* len = Smi::FromInt(number_of_elements);
- Object* obj;
- { MaybeObject* maybe_obj = heap->AllocateFixedArrayWithHoles(len->value());
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- }
-
// Set length and elements on the array.
+ int number_of_elements = args.length() - 1;
MaybeObject* maybe_object =
- array->EnsureCanContainElements(FixedArray::cast(obj));
+ array->EnsureCanContainElements(&args, 1, number_of_elements,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
- AssertNoAllocation no_gc;
- FixedArray* elms = FixedArray::cast(obj);
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- // Fill in the content
- for (int index = 0; index < number_of_elements; index++) {
- elms->set(index, args[index+1], mode);
+ // Allocate an appropriately typed elements array.
+ MaybeObject* maybe_elms;
+ ElementsKind elements_kind = array->GetElementsKind();
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ maybe_elms = heap->AllocateUninitializedFixedDoubleArray(
+ number_of_elements);
+ } else {
+ maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
}
+ FixedArrayBase* elms;
+ if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
- array->set_elements(FixedArray::cast(obj));
- array->set_length(len);
+ // Fill in the content
+ switch (array->GetElementsKind()) {
+ case FAST_SMI_ONLY_ELEMENTS: {
+ FixedArray* smi_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ smi_elms->set(index, args[index+1], SKIP_WRITE_BARRIER);
+ }
+ break;
+ }
+ case FAST_ELEMENTS: {
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ FixedArray* object_elms = FixedArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ object_elms->set(index, args[index+1], mode);
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS: {
+ FixedDoubleArray* double_elms = FixedDoubleArray::cast(elms);
+ for (int index = 0; index < number_of_elements; index++) {
+ double_elms->set(index, args[index+1]->Number());
+ }
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ array->set_elements(elms);
+ array->set_length(Smi::FromInt(number_of_elements));
return array;
}
@@ -424,7 +451,8 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
MaybeObject* maybe_array = array->EnsureCanContainElements(
args,
first_added_arg,
- args_length - first_added_arg);
+ args_length - first_added_arg,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_array->IsFailure()) return maybe_array;
return array->elements();
}
@@ -627,7 +655,8 @@ BUILTIN(ArrayUnshift) {
ASSERT(to_add <= (Smi::kMaxValue - len));
MaybeObject* maybe_object =
- array->EnsureCanContainElements(&args, 1, to_add);
+ array->EnsureCanContainElements(&args, 1, to_add,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
if (new_length > elms->length()) {
@@ -758,7 +787,8 @@ BUILTIN(ArraySlice) {
FixedArray* result_elms = FixedArray::cast(result);
MaybeObject* maybe_object =
- result_array->EnsureCanContainElements(result_elms);
+ result_array->EnsureCanContainElements(result_elms,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
if (maybe_object->IsFailure()) return maybe_object;
AssertNoAllocation no_gc;
@@ -1022,7 +1052,7 @@ BUILTIN(ArrayConcat) {
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
if (!array->HasFastSmiOnlyElements()) {
- result_array->EnsureCanContainNonSmiElements();
+ result_array->EnsureCanContainHeapObjectElements();
break;
}
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index ba7df802f..85410c3cc 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -101,7 +101,14 @@ Handle<Code> CodeStub::GetCode() {
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
- if (!FindCodeInCache(&code)) {
+ if (UseSpecialCache()
+ ? FindCodeInSpecialCache(&code)
+ : FindCodeInCache(&code)) {
+ ASSERT(IsPregenerated() == code->is_pregenerated());
+ return Handle<Code>(code);
+ }
+
+ {
HandleScope scope(isolate);
// Generate the new code.
@@ -121,19 +128,21 @@ Handle<Code> CodeStub::GetCode() {
RecordCodeGeneration(*new_object, &masm);
FinishCode(new_object);
- // Update the dictionary and the root in Heap.
- Handle<NumberDictionary> dict =
- factory->DictionaryAtNumberPut(
- Handle<NumberDictionary>(heap->code_stubs()),
- GetKey(),
- new_object);
- heap->public_set_code_stubs(*dict);
+ if (UseSpecialCache()) {
+ AddToSpecialCache(new_object);
+ } else {
+ // Update the dictionary and the root in Heap.
+ Handle<NumberDictionary> dict =
+ factory->DictionaryAtNumberPut(
+ Handle<NumberDictionary>(heap->code_stubs()),
+ GetKey(),
+ new_object);
+ heap->public_set_code_stubs(*dict);
+ }
code = *new_object;
- Activate(code);
- } else {
- CHECK(IsPregenerated() == code->is_pregenerated());
}
+ Activate(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
return Handle<Code>(code, isolate);
}
@@ -159,6 +168,32 @@ void CodeStub::PrintName(StringStream* stream) {
}
+void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
+ ASSERT(*known_map_ != NULL);
+ Isolate* isolate = new_object->GetIsolate();
+ Factory* factory = isolate->factory();
+ return Map::UpdateCodeCache(known_map_,
+ factory->compare_ic_symbol(),
+ new_object);
+}
+
+
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+ Isolate* isolate = known_map_->GetIsolate();
+ Factory* factory = isolate->factory();
+ Code::Flags flags = Code::ComputeFlags(
+ static_cast<Code::Kind>(GetCodeKind()),
+ UNINITIALIZED);
+ Handle<Object> probe(
+ known_map_->FindInCodeCache(*factory->compare_ic_symbol(), flags));
+ if (probe->IsCode()) {
+ *code_out = Code::cast(*probe);
+ return true;
+ }
+ return false;
+}
+
+
int ICCompareStub::MinorKey() {
return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@@ -184,6 +219,10 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
+ case CompareIC::KNOWN_OBJECTS:
+ ASSERT(*known_map_ != NULL);
+ GenerateKnownObjects(masm);
+ break;
default:
UNREACHABLE();
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 6bda5da70..34da148e5 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -194,6 +194,17 @@ class CodeStub BASE_EMBEDDED {
return UNINITIALIZED;
}
+ // Add the code to a specialized cache, specific to an individual
+ // stub type. Please note, this method must add the code object to a
+ // roots object, otherwise we will remove the code during GC.
+ virtual void AddToSpecialCache(Handle<Code> new_object) { }
+
+ // Find code in a specialized cache, work is delegated to the specific stub.
+ virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
+
+ // If a stub uses a special cache override this.
+ virtual bool UseSpecialCache() { return false; }
+
// Returns a name for logging/debugging purposes.
SmartArrayPointer<const char> GetName();
virtual void PrintName(StringStream* stream);
@@ -442,12 +453,17 @@ class InstanceofStub: public CodeStub {
class MathPowStub: public CodeStub {
public:
- MathPowStub() {}
+ enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK};
+
+ explicit MathPowStub(ExponentType exponent_type)
+ : exponent_type_(exponent_type) { }
virtual void Generate(MacroAssembler* masm);
private:
virtual CodeStub::Major MajorKey() { return MathPow; }
- virtual int MinorKey() { return 0; }
+ virtual int MinorKey() { return exponent_type_; }
+
+ ExponentType exponent_type_;
};
@@ -460,6 +476,8 @@ class ICCompareStub: public CodeStub {
virtual void Generate(MacroAssembler* masm);
+ void set_known_map(Handle<Map> map) { known_map_ = map; }
+
private:
class OpField: public BitField<int, 0, 3> { };
class StateField: public BitField<int, 3, 5> { };
@@ -479,12 +497,18 @@ class ICCompareStub: public CodeStub {
void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
+ void GenerateKnownObjects(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+ virtual void AddToSpecialCache(Handle<Code> new_object);
+ virtual bool FindCodeInSpecialCache(Code** code_out);
+ virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
+
Token::Value op_;
CompareIC::State state_;
+ Handle<Map> known_map_;
};
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 16ccfa0cf..d2a4a0bfd 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -398,7 +398,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
- isolate->StackOverflow();
+ if (!isolate->has_pending_exception()) isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
diff --git a/deps/v8/src/debug-agent.cc b/deps/v8/src/debug-agent.cc
index 591d0b3e4..c30afa85d 100644
--- a/deps/v8/src/debug-agent.cc
+++ b/deps/v8/src/debug-agent.cc
@@ -229,8 +229,6 @@ void DebuggerAgentSession::Shutdown() {
const char* const DebuggerAgentUtil::kContentLength = "Content-Length";
-const int DebuggerAgentUtil::kContentLengthSize =
- StrLength(kContentLength);
SmartArrayPointer<char> DebuggerAgentUtil::ReceiveMessage(const Socket* conn) {
diff --git a/deps/v8/src/debug-agent.h b/deps/v8/src/debug-agent.h
index a07fb0f49..61151900f 100644
--- a/deps/v8/src/debug-agent.h
+++ b/deps/v8/src/debug-agent.h
@@ -115,7 +115,6 @@ class DebuggerAgentSession: public Thread {
class DebuggerAgentUtil {
public:
static const char* const kContentLength;
- static const int kContentLengthSize;
static SmartArrayPointer<char> ReceiveMessage(const Socket* conn);
static bool SendConnectMessage(const Socket* conn,
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index c654dfbd2..1e970e150 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -1796,8 +1796,9 @@ void Debug::PrepareForBreakPoints() {
}
} else if (frame->function()->IsJSFunction()) {
JSFunction* function = JSFunction::cast(frame->function());
- if (function->code()->kind() == Code::FUNCTION &&
- !function->code()->has_debug_break_slots()) {
+ ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+ if (!frame->LookupCode()->has_debug_break_slots() ||
+ !function->shared()->code()->has_debug_break_slots()) {
active_functions.Add(Handle<JSFunction>(function));
}
}
@@ -1853,20 +1854,16 @@ void Debug::PrepareForBreakPoints() {
if (function->code() == *lazy_compile) {
function->set_code(shared->code());
}
- Handle<Code> current_code(function->code());
- if (shared->code()->has_debug_break_slots()) {
- // if the code is already recompiled to have break slots skip
- // recompilation.
- ASSERT(!function->code()->has_debug_break_slots());
- } else {
+ if (!shared->code()->has_debug_break_slots()) {
// Try to compile the full code with debug break slots. If it
// fails just keep the current code.
- ASSERT(shared->code() == *current_code);
+ Handle<Code> current_code(function->shared()->code());
ZoneScope zone_scope(isolate_, DELETE_ON_EXIT);
shared->set_code(*lazy_compile);
bool prev_force_debugger_active =
isolate_->debugger()->force_debugger_active();
isolate_->debugger()->set_force_debugger_active(true);
+ ASSERT(current_code->kind() == Code::FUNCTION);
CompileFullCodeForDebugging(shared, current_code);
isolate_->debugger()->set_force_debugger_active(
prev_force_debugger_active);
@@ -1883,10 +1880,13 @@ void Debug::PrepareForBreakPoints() {
// If the current frame is for this function in its
// non-optimized form rewrite the return address to continue
// in the newly compiled full code with debug break slots.
- if (frame->function()->IsJSFunction() &&
- frame->function() == *function &&
- frame->LookupCode()->kind() == Code::FUNCTION) {
- intptr_t delta = frame->pc() - current_code->instruction_start();
+ if (!frame->is_optimized() &&
+ frame->function()->IsJSFunction() &&
+ frame->function() == *function) {
+ ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+ Handle<Code> frame_code(frame->LookupCode());
+ if (frame_code->has_debug_break_slots()) continue;
+ intptr_t delta = frame->pc() - frame_code->instruction_start();
int debug_break_slot_count = 0;
int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT);
for (RelocIterator it(*new_code, mask); !it.done(); it.next()) {
@@ -1915,11 +1915,11 @@ void Debug::PrepareForBreakPoints() {
"for debugging, "
"changing pc from %08" V8PRIxPTR " to %08" V8PRIxPTR "\n",
reinterpret_cast<intptr_t>(
- current_code->instruction_start()),
+ frame_code->instruction_start()),
reinterpret_cast<intptr_t>(
- current_code->instruction_start()) +
- current_code->instruction_size(),
- current_code->instruction_size(),
+ frame_code->instruction_start()) +
+ frame_code->instruction_size(),
+ frame_code->instruction_size(),
reinterpret_cast<intptr_t>(new_code->instruction_start()),
reinterpret_cast<intptr_t>(new_code->instruction_start()) +
new_code->instruction_size(),
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index ef55d54ab..fd2b6d248 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -134,6 +134,22 @@ class ElementsAccessorBase : public ElementsAccessor {
JSObject* obj,
Object* length);
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) {
+ return ElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+ array,
+ capacity,
+ length);
+ }
+
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ int capacity,
+ int length) {
+ UNIMPLEMENTED();
+ return obj;
+ }
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
@@ -376,11 +392,6 @@ class FastObjectElementsAccessor
return heap->true_value();
}
- protected:
- friend class FastElementsAccessor<FastObjectElementsAccessor,
- FixedArray,
- kPointerSize>;
-
static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
uint32_t capacity,
uint32_t length) {
@@ -393,6 +404,11 @@ class FastObjectElementsAccessor
set_capacity_mode);
}
+ protected:
+ friend class FastElementsAccessor<FastObjectElementsAccessor,
+ FixedArray,
+ kPointerSize>;
+
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
@@ -405,6 +421,12 @@ class FastDoubleElementsAccessor
: public FastElementsAccessor<FastDoubleElementsAccessor,
FixedDoubleArray,
kDoubleSize> {
+ static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
+ uint32_t capacity,
+ uint32_t length) {
+ return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
+ }
+
protected:
friend class ElementsAccessorBase<FastDoubleElementsAccessor,
FixedDoubleArray>;
@@ -412,12 +434,6 @@ class FastDoubleElementsAccessor
FixedDoubleArray,
kDoubleSize>;
- static MaybeObject* SetFastElementsCapacityAndLength(JSObject* obj,
- uint32_t capacity,
- uint32_t length) {
- return obj->SetFastDoubleElementsCapacityAndLength(capacity, length);
- }
-
virtual MaybeObject* Delete(JSObject* obj,
uint32_t key,
JSReceiver::DeleteMode mode) {
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index ed1ca5e58..a2a184d52 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -44,11 +44,24 @@ class ElementsAccessor {
JSObject* holder,
Object* receiver) = 0;
- // Modifies the length data property as specified for JSArrays and resizes
- // the underlying backing store accordingly.
+ // Modifies the length data property as specified for JSArrays and resizes the
+ // underlying backing store accordingly. The method honors the semantics of
+ // changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
+ // have non-deletable elements can only be shrunk to the size of highest
+ // element that is non-deletable.
virtual MaybeObject* SetLength(JSObject* holder,
Object* new_length) = 0;
+ // Modifies both the length and capacity of a JSArray, resizing the underlying
+ // backing store as necessary. This method does NOT honor the semantics of
+ // EcmaScript 5.1 15.4.5.2, arrays can be shrunk beyond non-deletable
+ // elements. This method should only be called for array expansion OR by
+ // runtime JavaScript code that use InternalArrays and don't care about
+ // EcmaScript 5.1 semantics.
+ virtual MaybeObject* SetCapacityAndLength(JSArray* array,
+ int capacity,
+ int length) = 0;
+
virtual MaybeObject* Delete(JSObject* holder,
uint32_t key,
JSReceiver::DeleteMode mode) = 0;
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index f1042a4c6..c2976a577 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -926,28 +926,48 @@ Handle<JSArray> Factory::NewJSArray(int capacity,
}
-Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArray> elements,
+Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
PretenureFlag pretenure) {
Handle<JSArray> result =
Handle<JSArray>::cast(NewJSObject(isolate()->array_function(),
pretenure));
+ result->set_length(Smi::FromInt(0));
SetContent(result, elements);
return result;
}
+void Factory::SetElementsCapacityAndLength(Handle<JSArray> array,
+ int capacity,
+ int length) {
+ ElementsAccessor* accessor = array->GetElementsAccessor();
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ accessor->SetCapacityAndLength(*array, capacity, length));
+}
+
+
void Factory::SetContent(Handle<JSArray> array,
- Handle<FixedArray> elements) {
+ Handle<FixedArrayBase> elements) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
array->SetContent(*elements));
}
-void Factory::EnsureCanContainNonSmiElements(Handle<JSArray> array) {
+void Factory::EnsureCanContainHeapObjectElements(Handle<JSArray> array) {
+ CALL_HEAP_FUNCTION_VOID(
+ isolate(),
+ array->EnsureCanContainHeapObjectElements());
+}
+
+
+void Factory::EnsureCanContainElements(Handle<JSArray> array,
+ Handle<FixedArrayBase> elements,
+ EnsureElementsMode mode) {
CALL_HEAP_FUNCTION_VOID(
isolate(),
- array->EnsureCanContainNonSmiElements());
+ array->EnsureCanContainElements(*elements, mode));
}
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index 0f028e5c5..e9a43fd4f 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -259,12 +259,19 @@ class Factory {
PretenureFlag pretenure = NOT_TENURED);
Handle<JSArray> NewJSArrayWithElements(
- Handle<FixedArray> elements,
+ Handle<FixedArrayBase> elements,
PretenureFlag pretenure = NOT_TENURED);
- void SetContent(Handle<JSArray> array, Handle<FixedArray> elements);
+ void SetElementsCapacityAndLength(Handle<JSArray> array,
+ int capacity,
+ int length);
- void EnsureCanContainNonSmiElements(Handle<JSArray> array);
+ void SetContent(Handle<JSArray> array, Handle<FixedArrayBase> elements);
+
+ void EnsureCanContainHeapObjectElements(Handle<JSArray> array);
+ void EnsureCanContainElements(Handle<JSArray> array,
+ Handle<FixedArrayBase> elements,
+ EnsureElementsMode mode);
Handle<JSProxy> NewJSProxy(Handle<Object> handler, Handle<Object> prototype);
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 9fd00422a..e3ed2de4e 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -723,12 +723,17 @@ void JavaScriptFrame::PrintTop(FILE* file,
JavaScriptFrame* frame = it.frame();
if (frame->IsConstructor()) PrintF(file, "new ");
// function name
- Object* fun = frame->function();
- if (fun->IsJSFunction()) {
- SharedFunctionInfo* shared = JSFunction::cast(fun)->shared();
- shared->DebugName()->ShortPrint(file);
+ Object* maybe_fun = frame->function();
+ if (maybe_fun->IsJSFunction()) {
+ JSFunction* fun = JSFunction::cast(maybe_fun);
+ fun->PrintName();
+ Code* js_code = frame->unchecked_code();
+ Address pc = frame->pc();
+ int code_offset =
+ static_cast<int>(pc - js_code->instruction_start());
+ PrintF("+%d", code_offset);
+ SharedFunctionInfo* shared = fun->shared();
if (print_line_number) {
- Address pc = frame->pc();
Code* code = Code::cast(
v8::internal::Isolate::Current()->heap()->FindCodeObject(pc));
int source_pos = code->SourcePosition(pc);
@@ -751,7 +756,7 @@ void JavaScriptFrame::PrintTop(FILE* file,
}
}
} else {
- fun->ShortPrint(file);
+ PrintF("<unknown>");
}
if (print_args) {
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index 8977cdb46..ef6e58ed0 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -125,7 +125,8 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map(map);
+ // String maps are all immortal immovable objects.
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(str.length());
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index f948c6c88..bc7550ed9 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -80,7 +80,7 @@ Heap::Heap()
#endif
reserved_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
max_semispace_size_(8 * Max(LUMP_OF_MEMORY, Page::kPageSize)),
- initial_semispace_size_(Max(LUMP_OF_MEMORY, Page::kPageSize)),
+ initial_semispace_size_(Page::kPageSize),
max_old_generation_size_(700ul * LUMP_OF_MEMORY),
max_executable_size_(128l * LUMP_OF_MEMORY),
@@ -1012,7 +1012,7 @@ void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
// Store Buffer overflowed while scanning promoted objects. These are not
// in any particular page, though they are likely to be clustered by the
// allocation routines.
- store_buffer_->HandleFullness();
+ store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize);
} else {
// Store Buffer overflowed while scanning a particular old space page for
// pointers to new space.
@@ -1813,7 +1813,7 @@ MaybeObject* Heap::AllocateMap(InstanceType instance_type,
}
Map* map = reinterpret_cast<Map*>(result);
- map->set_map_unsafe(meta_map());
+ map->set_map_no_write_barrier(meta_map());
map->set_instance_type(instance_type);
map->set_visitor_id(
StaticVisitorBase::GetVisitorId(instance_type, instance_size));
@@ -2173,7 +2173,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value, PretenureFlag pretenure) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -2191,7 +2191,7 @@ MaybeObject* Heap::AllocateHeapNumber(double value) {
{ MaybeObject* maybe_result = new_space_.AllocateRaw(HeapNumber::kSize);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap_number_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap_number_map());
HeapNumber::cast(result)->set_value(value);
return result;
}
@@ -2202,7 +2202,8 @@ MaybeObject* Heap::AllocateJSGlobalPropertyCell(Object* value) {
{ MaybeObject* maybe_result = AllocateRawCell();
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(global_property_cell_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(
+ global_property_cell_map());
JSGlobalPropertyCell::cast(result)->set_value(value);
return result;
}
@@ -2416,6 +2417,7 @@ bool Heap::CreateInitialObjects() {
}
set_code_stubs(NumberDictionary::cast(obj));
+
// Allocate the non_monomorphic_cache used in stub-cache.cc. The initial size
// is set to avoid expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = NumberDictionary::Allocate(64);
@@ -2543,7 +2545,7 @@ void StringSplitCache::Enter(Heap* heap,
}
}
}
- array->set_map(heap->fixed_cow_array_map());
+ array->set_map_no_write_barrier(heap->fixed_cow_array_map());
}
@@ -3139,7 +3141,8 @@ MaybeObject* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+ byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -3157,7 +3160,8 @@ MaybeObject* Heap::AllocateByteArray(int length) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ByteArray*>(result)->set_map_unsafe(byte_array_map());
+ reinterpret_cast<ByteArray*>(result)->set_map_no_write_barrier(
+ byte_array_map());
reinterpret_cast<ByteArray*>(result)->set_length(length);
return result;
}
@@ -3167,11 +3171,11 @@ void Heap::CreateFillerObjectAt(Address addr, int size) {
if (size == 0) return;
HeapObject* filler = HeapObject::FromAddress(addr);
if (size == kPointerSize) {
- filler->set_map_unsafe(one_pointer_filler_map());
+ filler->set_map_no_write_barrier(one_pointer_filler_map());
} else if (size == 2 * kPointerSize) {
- filler->set_map_unsafe(two_pointer_filler_map());
+ filler->set_map_no_write_barrier(two_pointer_filler_map());
} else {
- filler->set_map_unsafe(free_space_map());
+ filler->set_map_no_write_barrier(free_space_map());
FreeSpace::cast(filler)->set_size(size);
}
}
@@ -3189,7 +3193,7 @@ MaybeObject* Heap::AllocateExternalArray(int length,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<ExternalArray*>(result)->set_map_unsafe(
+ reinterpret_cast<ExternalArray*>(result)->set_map_no_write_barrier(
MapForExternalArrayType(array_type));
reinterpret_cast<ExternalArray*>(result)->set_length(length);
reinterpret_cast<ExternalArray*>(result)->set_external_pointer(
@@ -3226,7 +3230,7 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
if (!maybe_result->ToObject(&result)) return maybe_result;
// Initialize the object
- HeapObject::cast(result)->set_map_unsafe(code_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(code_map());
Code* code = Code::cast(result);
ASSERT(!isolate_->code_range()->exists() ||
isolate_->code_range()->contains(code->address()));
@@ -3355,7 +3359,7 @@ MaybeObject* Heap::Allocate(Map* map, AllocationSpace space) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// No need for write barrier since object is white and map is in old space.
- HeapObject::cast(result)->set_map_unsafe(map);
+ HeapObject::cast(result)->set_map_no_write_barrier(map);
return result;
}
@@ -4084,7 +4088,7 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map_unsafe(map);
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(map);
// Set length and hash fields of the allocated string.
String* answer = String::cast(result);
answer->set_length(chars);
@@ -4128,7 +4132,7 @@ MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_unsafe(ascii_string_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(ascii_string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4163,7 +4167,7 @@ MaybeObject* Heap::AllocateRawTwoByteString(int length,
}
// Partially initialize the object.
- HeapObject::cast(result)->set_map_unsafe(string_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(string_map());
String::cast(result)->set_length(length);
String::cast(result)->set_hash_field(String::kEmptyHashField);
ASSERT_EQ(size, HeapObject::cast(result)->Size());
@@ -4179,7 +4183,8 @@ MaybeObject* Heap::AllocateEmptyFixedArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedArray*>(result)->set_map_unsafe(fixed_array_map());
+ reinterpret_cast<FixedArray*>(result)->set_map_no_write_barrier(
+ fixed_array_map());
reinterpret_cast<FixedArray*>(result)->set_length(0);
return result;
}
@@ -4208,13 +4213,13 @@ MaybeObject* Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
}
if (InNewSpace(obj)) {
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_unsafe(map);
+ dst->set_map_no_write_barrier(map);
CopyBlock(dst->address() + kPointerSize,
src->address() + kPointerSize,
FixedArray::SizeFor(len) - kPointerSize);
return obj;
}
- HeapObject::cast(obj)->set_map_unsafe(map);
+ HeapObject::cast(obj)->set_map_no_write_barrier(map);
FixedArray* result = FixedArray::cast(obj);
result->set_length(len);
@@ -4234,7 +4239,7 @@ MaybeObject* Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
HeapObject* dst = HeapObject::cast(obj);
- dst->set_map_unsafe(map);
+ dst->set_map_no_write_barrier(map);
CopyBlock(
dst->address() + FixedDoubleArray::kLengthOffset,
src->address() + FixedDoubleArray::kLengthOffset,
@@ -4252,7 +4257,7 @@ MaybeObject* Heap::AllocateFixedArray(int length) {
}
// Initialize header.
FixedArray* array = reinterpret_cast<FixedArray*>(result);
- array->set_map_unsafe(fixed_array_map());
+ array->set_map_no_write_barrier(fixed_array_map());
array->set_length(length);
// Initialize body.
ASSERT(!InNewSpace(undefined_value()));
@@ -4300,7 +4305,7 @@ MUST_USE_RESULT static MaybeObject* AllocateFixedArrayWithFiller(
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- HeapObject::cast(result)->set_map_unsafe(heap->fixed_array_map());
+ HeapObject::cast(result)->set_map_no_write_barrier(heap->fixed_array_map());
FixedArray* array = FixedArray::cast(result);
array->set_length(length);
MemsetPointer(array->data_start(), filler, length);
@@ -4333,7 +4338,8 @@ MaybeObject* Heap::AllocateUninitializedFixedArray(int length) {
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedArray*>(obj)->set_map_unsafe(fixed_array_map());
+ reinterpret_cast<FixedArray*>(obj)->set_map_no_write_barrier(
+ fixed_array_map());
FixedArray::cast(obj)->set_length(length);
return obj;
}
@@ -4347,7 +4353,7 @@ MaybeObject* Heap::AllocateEmptyFixedDoubleArray() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Initialize the object.
- reinterpret_cast<FixedDoubleArray*>(result)->set_map_unsafe(
+ reinterpret_cast<FixedDoubleArray*>(result)->set_map_no_write_barrier(
fixed_double_array_map());
reinterpret_cast<FixedDoubleArray*>(result)->set_length(0);
return result;
@@ -4364,7 +4370,7 @@ MaybeObject* Heap::AllocateUninitializedFixedDoubleArray(
if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- reinterpret_cast<FixedDoubleArray*>(obj)->set_map_unsafe(
+ reinterpret_cast<FixedDoubleArray*>(obj)->set_map_no_write_barrier(
fixed_double_array_map());
FixedDoubleArray::cast(obj)->set_length(length);
return obj;
@@ -4401,7 +4407,8 @@ MaybeObject* Heap::AllocateHashTable(int length, PretenureFlag pretenure) {
{ MaybeObject* maybe_result = AllocateFixedArray(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- reinterpret_cast<HeapObject*>(result)->set_map_unsafe(hash_table_map());
+ reinterpret_cast<HeapObject*>(result)->set_map_no_write_barrier(
+ hash_table_map());
ASSERT(result->IsHashTable());
return result;
}
@@ -4414,7 +4421,7 @@ MaybeObject* Heap::AllocateGlobalContext() {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(global_context_map());
+ context->set_map_no_write_barrier(global_context_map());
ASSERT(context->IsGlobalContext());
ASSERT(result->IsContext());
return result;
@@ -4428,7 +4435,7 @@ MaybeObject* Heap::AllocateFunctionContext(int length, JSFunction* function) {
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(function_context_map());
+ context->set_map_no_write_barrier(function_context_map());
context->set_closure(function);
context->set_previous(function->context());
context->set_extension(NULL);
@@ -4448,7 +4455,7 @@ MaybeObject* Heap::AllocateCatchContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(catch_context_map());
+ context->set_map_no_write_barrier(catch_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(name);
@@ -4466,7 +4473,7 @@ MaybeObject* Heap::AllocateWithContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(with_context_map());
+ context->set_map_no_write_barrier(with_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(extension);
@@ -4484,7 +4491,7 @@ MaybeObject* Heap::AllocateBlockContext(JSFunction* function,
if (!maybe_result->ToObject(&result)) return maybe_result;
}
Context* context = reinterpret_cast<Context*>(result);
- context->set_map_unsafe(block_context_map());
+ context->set_map_no_write_barrier(block_context_map());
context->set_closure(function);
context->set_previous(previous);
context->set_extension(scope_info);
@@ -4497,7 +4504,7 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
FixedArray* scope_info;
MaybeObject* maybe_scope_info = AllocateFixedArray(length, TENURED);
if (!maybe_scope_info->To(&scope_info)) return maybe_scope_info;
- scope_info->set_map_unsafe(scope_info_map());
+ scope_info->set_map_no_write_barrier(scope_info_map());
return scope_info;
}
@@ -4541,8 +4548,10 @@ void Heap::EnsureHeapIsIterable() {
bool Heap::IdleNotification(int hint) {
- if (!FLAG_incremental_marking || FLAG_expose_gc || Serializer::enabled()) {
- return hint < 1000 ? true : IdleGlobalGC();
+ if (hint >= 1000) return IdleGlobalGC();
+ if (contexts_disposed_ > 0 || !FLAG_incremental_marking ||
+ FLAG_expose_gc || Serializer::enabled()) {
+ return true;
}
// By doing small chunks of GC work in each IdleNotification,
@@ -5150,29 +5159,29 @@ void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointer(reinterpret_cast<Object**>(&roots_[kSymbolTableRootIndex]));
- v->Synchronize("symbol_table");
+ v->Synchronize(VisitorSynchronization::kSymbolTable);
if (mode != VISIT_ALL_IN_SCAVENGE &&
mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
// Scavenge collections have special processing for this.
external_string_table_.Iterate(v);
}
- v->Synchronize("external_string_table");
+ v->Synchronize(VisitorSynchronization::kExternalStringsTable);
}
void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
- v->Synchronize("strong_root_list");
+ v->Synchronize(VisitorSynchronization::kStrongRootList);
v->VisitPointer(BitCast<Object**>(&hidden_symbol_));
- v->Synchronize("symbol");
+ v->Synchronize(VisitorSynchronization::kSymbol);
isolate_->bootstrapper()->Iterate(v);
- v->Synchronize("bootstrapper");
+ v->Synchronize(VisitorSynchronization::kBootstrapper);
isolate_->Iterate(v);
- v->Synchronize("top");
+ v->Synchronize(VisitorSynchronization::kTop);
Relocatable::Iterate(v);
- v->Synchronize("relocatable");
+ v->Synchronize(VisitorSynchronization::kRelocatable);
#ifdef ENABLE_DEBUGGER_SUPPORT
isolate_->debug()->Iterate(v);
@@ -5180,13 +5189,13 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
isolate_->deoptimizer_data()->Iterate(v);
}
#endif
- v->Synchronize("debug");
+ v->Synchronize(VisitorSynchronization::kDebug);
isolate_->compilation_cache()->Iterate(v);
- v->Synchronize("compilationcache");
+ v->Synchronize(VisitorSynchronization::kCompilationCache);
// Iterate over local handles in handle scopes.
isolate_->handle_scope_implementer()->Iterate(v);
- v->Synchronize("handlescope");
+ v->Synchronize(VisitorSynchronization::kHandleScope);
// Iterate over the builtin code objects and code stubs in the
// heap. Note that it is not necessary to iterate over code objects
@@ -5194,7 +5203,7 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
if (mode != VISIT_ALL_IN_SCAVENGE) {
isolate_->builtins()->IterateBuiltins(v);
}
- v->Synchronize("builtins");
+ v->Synchronize(VisitorSynchronization::kBuiltins);
// Iterate over global handles.
switch (mode) {
@@ -5209,11 +5218,11 @@ void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
isolate_->global_handles()->IterateAllRoots(v);
break;
}
- v->Synchronize("globalhandles");
+ v->Synchronize(VisitorSynchronization::kGlobalHandles);
// Iterate over pointers being held by inactive threads.
isolate_->thread_manager()->Iterate(v);
- v->Synchronize("threadmanager");
+ v->Synchronize(VisitorSynchronization::kThreadManager);
// Iterate over the pointers the Serialization/Deserialization code is
// holding.
@@ -5413,7 +5422,7 @@ class HeapDebugUtils {
Address map_addr = map_p->address();
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
MarkObjectRecursively(&map);
@@ -5460,7 +5469,7 @@ class HeapDebugUtils {
HeapObject* map_p = HeapObject::FromAddress(map_addr);
- obj->set_map(reinterpret_cast<Map*>(map_p));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
@@ -6172,7 +6181,7 @@ void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
Address map_addr = map_p->address();
- obj->set_map(reinterpret_cast<Map*>(map_addr + kMarkTag));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
// Scan the object body.
if (is_global_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
@@ -6214,7 +6223,7 @@ void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
HeapObject* map_p = HeapObject::FromAddress(map_addr);
- obj->set_map(reinterpret_cast<Map*>(map_p));
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
UnmarkRecursively(reinterpret_cast<Object**>(&map_p), unmark_visitor);
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 741e3d977..d92a4fb14 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -245,6 +245,7 @@ inline Heap* _inline_get_heap_();
V(use_strict, "use strict") \
V(dot_symbol, ".") \
V(anonymous_function_symbol, "(anonymous function)") \
+ V(compare_ic_symbol, ".compare_ic") \
V(infinity_symbol, "Infinity") \
V(minus_infinity_symbol, "-Infinity")
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index 32c3abfe7..31d7d153f 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -1227,10 +1227,7 @@ void HConstant::PrintDataTo(StringStream* stream) {
bool HArrayLiteral::IsCopyOnWrite() const {
- Handle<FixedArray> constant_elements = this->constant_elements();
- FixedArrayBase* constant_elements_values =
- FixedArrayBase::cast(constant_elements->get(1));
- return constant_elements_values->map() == HEAP->fixed_cow_array_map();
+ return boilerplate_object_->elements()->map() == HEAP->fixed_cow_array_map();
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 52fed8844..d7c0eb059 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -3447,8 +3447,21 @@ class HStoreGlobalGeneric: public HTemplateInstruction<3> {
class HLoadContextSlot: public HUnaryOperation {
public:
- HLoadContextSlot(HValue* context , int slot_index)
- : HUnaryOperation(context), slot_index_(slot_index) {
+ enum Mode {
+ // Perform a normal load of the context slot without checking its value.
+ kLoad,
+ // Load and check the value of the context slot. Deoptimize if it's the
+ // hole value. This is used for checking for loading of uninitialized
+ // harmony bindings where we deoptimize into full-codegen generated code
+ // which will subsequently throw a reference error.
+ kLoadCheck
+ };
+
+ HLoadContextSlot(HValue* context, Variable* var)
+ : HUnaryOperation(context), slot_index_(var->index()) {
+ ASSERT(var->IsContextSlot());
+ mode_ = (var->mode() == LET || var->mode() == CONST_HARMONY)
+ ? kLoadCheck : kLoad;
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetFlag(kDependsOnContextSlots);
@@ -3456,6 +3469,10 @@ class HLoadContextSlot: public HUnaryOperation {
int slot_index() const { return slot_index_; }
+ bool RequiresHoleCheck() {
+ return mode_ == kLoadCheck;
+ }
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3472,13 +3489,25 @@ class HLoadContextSlot: public HUnaryOperation {
private:
int slot_index_;
+ Mode mode_;
};
class HStoreContextSlot: public HTemplateInstruction<2> {
public:
- HStoreContextSlot(HValue* context, int slot_index, HValue* value)
- : slot_index_(slot_index) {
+ enum Mode {
+ // Perform a normal store to the context slot without checking its previous
+ // value.
+ kAssign,
+ // Check the previous value of the context slot and deoptimize if it's the
+ // hole value. This is used for checking for assignments to uninitialized
+ // harmony bindings where we deoptimize into full-codegen generated code
+ // which will subsequently throw a reference error.
+ kAssignCheck
+ };
+
+ HStoreContextSlot(HValue* context, int slot_index, Mode mode, HValue* value)
+ : slot_index_(slot_index), mode_(mode) {
SetOperandAt(0, context);
SetOperandAt(1, value);
SetFlag(kChangesContextSlots);
@@ -3487,11 +3516,16 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
int slot_index() const { return slot_index_; }
+ Mode mode() const { return mode_; }
bool NeedsWriteBarrier() {
return StoringValueNeedsWriteBarrier(value());
}
+ bool RequiresHoleCheck() {
+ return mode_ == kAssignCheck;
+ }
+
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
@@ -3502,6 +3536,7 @@ class HStoreContextSlot: public HTemplateInstruction<2> {
private:
int slot_index_;
+ Mode mode_;
};
@@ -4167,18 +4202,21 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
- Handle<FixedArray> constant_elements,
+ Handle<JSObject> boilerplate_object,
int length,
int literal_index,
int depth)
: HMaterializedLiteral<1>(literal_index, depth),
length_(length),
- constant_elements_(constant_elements) {
+ boilerplate_object_(boilerplate_object) {
SetOperandAt(0, context);
}
HValue* context() { return OperandAt(0); }
- Handle<FixedArray> constant_elements() const { return constant_elements_; }
+ ElementsKind boilerplate_elements_kind() const {
+ return boilerplate_object_->GetElementsKind();
+ }
+ Handle<JSObject> boilerplate_object() const { return boilerplate_object_; }
int length() const { return length_; }
bool IsCopyOnWrite() const;
@@ -4192,7 +4230,7 @@ class HArrayLiteral: public HMaterializedLiteral<1> {
private:
int length_;
- Handle<FixedArray> constant_elements_;
+ Handle<JSObject> boilerplate_object_;
};
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index 5c0703bc3..36632374e 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -2756,10 +2756,13 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
}
// 2. Build all the tests, with dangling true branches
+ int default_id = AstNode::kNoNumber;
for (int i = 0; i < clause_count; ++i) {
CaseClause* clause = clauses->at(i);
- if (clause->is_default()) continue;
-
+ if (clause->is_default()) {
+ default_id = clause->EntryId();
+ continue;
+ }
if (switch_type == SMI_SWITCH) {
clause->RecordTypeFeedback(oracle());
}
@@ -2806,7 +2809,10 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
HBasicBlock* last_block = current_block();
if (not_string_block != NULL) {
- last_block = CreateJoin(last_block, not_string_block, stmt->ExitId());
+ int join_id = (default_id != AstNode::kNoNumber)
+ ? default_id
+ : stmt->ExitId();
+ last_block = CreateJoin(last_block, not_string_block, join_id);
}
// 3. Loop over the clauses and the linked list of tests in lockstep,
@@ -3222,11 +3228,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
Variable* variable = expr->var();
- if (variable->mode() == LET) {
- return Bailout("reference to let variable");
- }
switch (variable->location()) {
case Variable::UNALLOCATED: {
+ if (variable->mode() == LET || variable->mode() == CONST_HARMONY) {
+ return Bailout("reference to global harmony declared variable");
+ }
// Handle known global constants like 'undefined' specially to avoid a
// load from a global cell for them.
Handle<Object> constant_value =
@@ -3269,9 +3275,11 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
HValue* value = environment()->Lookup(variable);
- if (variable->mode() == CONST &&
- value == graph()->GetConstantHole()) {
- return Bailout("reference to uninitialized const variable");
+ if (value == graph()->GetConstantHole()) {
+ ASSERT(variable->mode() == CONST ||
+ variable->mode() == CONST_HARMONY ||
+ variable->mode() == LET);
+ return Bailout("reference to uninitialized variable");
}
return ast_context()->ReturnValue(value);
}
@@ -3281,8 +3289,7 @@ void HGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
return Bailout("reference to const context slot");
}
HValue* context = BuildContextChainWalk(variable);
- HLoadContextSlot* instr =
- new(zone()) HLoadContextSlot(context, variable->index());
+ HLoadContextSlot* instr = new(zone()) HLoadContextSlot(context, variable);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -3325,13 +3332,13 @@ static bool IsFastObjectLiteral(Handle<JSObject> boilerplate,
int* total_size) {
if (max_depth <= 0) return false;
- FixedArrayBase* elements = boilerplate->elements();
+ Handle<FixedArrayBase> elements(boilerplate->elements());
if (elements->length() > 0 &&
elements->map() != HEAP->fixed_cow_array_map()) {
return false;
}
- FixedArray* properties = boilerplate->properties();
+ Handle<FixedArray> properties(boilerplate->properties());
if (properties->length() > 0) {
return false;
} else {
@@ -3457,11 +3464,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
int length = subexprs->length();
HValue* context = environment()->LookupContext();
- HArrayLiteral* literal = new(zone()) HArrayLiteral(context,
- expr->constant_elements(),
- length,
- expr->literal_index(),
- expr->depth());
+ Handle<FixedArray> literals(environment()->closure()->literals());
+ Handle<Object> raw_boilerplate(literals->get(expr->literal_index()));
+
+ // For now, no boilerplate causes a deopt.
+ if (raw_boilerplate->IsUndefined()) {
+ AddInstruction(new(zone()) HSoftDeoptimize);
+ return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+ }
+
+ Handle<JSObject> boilerplate(Handle<JSObject>::cast(raw_boilerplate));
+ ElementsKind boilerplate_elements_kind = boilerplate->GetElementsKind();
+
+ HArrayLiteral* literal = new(zone()) HArrayLiteral(
+ context,
+ boilerplate,
+ length,
+ expr->literal_index(),
+ expr->depth());
+
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
@@ -3484,42 +3505,25 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
HValue* key = AddInstruction(
new(zone()) HConstant(Handle<Object>(Smi::FromInt(i)),
Representation::Integer32()));
- HInstruction* elements_kind =
- AddInstruction(new(zone()) HElementsKind(literal));
- HBasicBlock* store_fast = graph()->CreateBasicBlock();
- // Two empty blocks to satisfy edge split form.
- HBasicBlock* store_fast_edgesplit1 = graph()->CreateBasicBlock();
- HBasicBlock* store_fast_edgesplit2 = graph()->CreateBasicBlock();
- HBasicBlock* store_generic = graph()->CreateBasicBlock();
- HBasicBlock* check_smi_only_elements = graph()->CreateBasicBlock();
- HBasicBlock* join = graph()->CreateBasicBlock();
-
- HIsSmiAndBranch* smicheck = new(zone()) HIsSmiAndBranch(value);
- smicheck->SetSuccessorAt(0, store_fast_edgesplit1);
- smicheck->SetSuccessorAt(1, check_smi_only_elements);
- current_block()->Finish(smicheck);
- store_fast_edgesplit1->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(check_smi_only_elements);
- HCompareConstantEqAndBranch* smi_elements_check =
- new(zone()) HCompareConstantEqAndBranch(elements_kind,
- FAST_ELEMENTS,
- Token::EQ_STRICT);
- smi_elements_check->SetSuccessorAt(0, store_fast_edgesplit2);
- smi_elements_check->SetSuccessorAt(1, store_generic);
- current_block()->Finish(smi_elements_check);
- store_fast_edgesplit2->Finish(new(zone()) HGoto(store_fast));
-
- set_current_block(store_fast);
- AddInstruction(new(zone()) HStoreKeyedFastElement(elements, key, value));
- store_fast->Goto(join);
-
- set_current_block(store_generic);
- AddInstruction(BuildStoreKeyedGeneric(literal, key, value));
- store_generic->Goto(join);
- join->SetJoinId(expr->id());
- set_current_block(join);
+ switch (boilerplate_elements_kind) {
+ case FAST_SMI_ONLY_ELEMENTS:
+ case FAST_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyedFastElement(
+ elements,
+ key,
+ value,
+ boilerplate_elements_kind));
+ break;
+ case FAST_DOUBLE_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
+ key,
+ value));
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
AddSimulate(expr->GetIdForElement(i));
}
@@ -3838,8 +3842,11 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
}
HValue* context = BuildContextChainWalk(var);
+ HStoreContextSlot::Mode mode =
+ (var->mode() == LET || var->mode() == CONST_HARMONY)
+ ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), Top());
+ new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -3959,8 +3966,10 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
// variables (e.g. initialization inside a loop).
HValue* old_value = environment()->Lookup(var);
AddInstruction(new HUseConst(old_value));
- } else if (var->mode() == LET) {
- return Bailout("unsupported assignment to let");
+ } else if (var->mode() == CONST_HARMONY) {
+ if (expr->op() != Token::INIT_CONST_HARMONY) {
+ return Bailout("non-initializer assignment to const");
+ }
}
if (proxy->IsArguments()) return Bailout("assignment to arguments");
@@ -3977,6 +3986,14 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
case Variable::PARAMETER:
case Variable::LOCAL: {
+ // Perform an initialization check for let declared variables
+ // or parameters.
+ if (var->mode() == LET && expr->op() == Token::ASSIGN) {
+ HValue* env_value = environment()->Lookup(var);
+ if (env_value == graph()->GetConstantHole()) {
+ return Bailout("assignment to let variable before initialization");
+ }
+ }
// We do not allow the arguments object to occur in a context where it
// may escape, but assignments to stack-allocated locals are
// permitted.
@@ -4004,8 +4021,18 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* context = BuildContextChainWalk(var);
- HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), Top());
+ HStoreContextSlot::Mode mode;
+ if (expr->op() == Token::ASSIGN) {
+ mode = (var->mode() == LET || var->mode() == CONST_HARMONY)
+ ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
+ } else {
+ ASSERT(expr->op() == Token::INIT_VAR ||
+ expr->op() == Token::INIT_LET ||
+ expr->op() == Token::INIT_CONST_HARMONY);
+ mode = HStoreContextSlot::kAssign;
+ }
+ HStoreContextSlot* instr = new(zone()) HStoreContextSlot(
+ context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -5614,8 +5641,11 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
}
HValue* context = BuildContextChainWalk(var);
+ HStoreContextSlot::Mode mode =
+ (var->mode() == LET || var->mode() == CONST_HARMONY)
+ ? HStoreContextSlot::kAssignCheck : HStoreContextSlot::kAssign;
HStoreContextSlot* instr =
- new(zone()) HStoreContextSlot(context, var->index(), after);
+ new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
AddSimulate(expr->AssignmentId());
@@ -6116,14 +6146,27 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
- AddInstruction(new(zone()) HCheckNonSmi(left));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
- AddInstruction(new(zone()) HCheckNonSmi(right));
- AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
- HCompareObjectEqAndBranch* result =
- new(zone()) HCompareObjectEqAndBranch(left, right);
- result->set_position(expr->position());
- return ast_context()->ReturnControl(result, expr->id());
+ // Can we get away with map check and not instance type check?
+ Handle<Map> map = oracle()->GetCompareMap(expr);
+ if (!map.is_null()) {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(new(zone()) HCheckMap(left, map));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(new(zone()) HCheckMap(right, map));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ } else {
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(left));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckInstanceType::NewIsSpecObject(right));
+ HCompareObjectEqAndBranch* result =
+ new(zone()) HCompareObjectEqAndBranch(left, right);
+ result->set_position(expr->position());
+ return ast_context()->ReturnControl(result, expr->id());
+ }
}
default:
return Bailout("Unsupported non-primitive compare");
@@ -6188,28 +6231,27 @@ void HGraphBuilder::VisitDeclaration(Declaration* decl) {
void HGraphBuilder::HandleDeclaration(VariableProxy* proxy,
VariableMode mode,
FunctionLiteral* function) {
- if (mode == LET || mode == CONST_HARMONY) {
- return Bailout("unsupported harmony declaration");
- }
Variable* var = proxy->var();
+ bool binding_needs_init =
+ (mode == CONST || mode == CONST_HARMONY || mode == LET);
switch (var->location()) {
case Variable::UNALLOCATED:
return Bailout("unsupported global declaration");
case Variable::PARAMETER:
case Variable::LOCAL:
case Variable::CONTEXT:
- if (mode == CONST || function != NULL) {
+ if (binding_needs_init || function != NULL) {
HValue* value = NULL;
- if (mode == CONST) {
- value = graph()->GetConstantHole();
- } else {
+ if (function != NULL) {
VisitForValue(function);
value = Pop();
+ } else {
+ value = graph()->GetConstantHole();
}
if (var->IsContextSlot()) {
HValue* context = environment()->LookupContext();
- HStoreContextSlot* store =
- new HStoreContextSlot(context, var->index(), value);
+ HStoreContextSlot* store = new HStoreContextSlot(
+ context, var->index(), HStoreContextSlot::kAssign, value);
AddInstruction(store);
if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
} else {
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index fb625fb24..7a5a19164 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -388,8 +388,91 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
- while ((pc_offset() & (m - 1)) != 0) {
- nop();
+ int mask = m - 1;
+ int addr = pc_offset();
+ Nop((m - (addr & mask)) & mask);
+}
+
+
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
+void Assembler::Nop(int bytes) {
+ EnsureSpace ensure_space(this);
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ // Older CPUs that do not support SSE2 may not support multibyte NOP
+ // instructions.
+ for (; bytes > 0; bytes--) {
+ EMIT(0x90);
+ }
+ return;
+ }
+
+ // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf
+ while (bytes > 0) {
+ switch (bytes) {
+ case 2:
+ EMIT(0x66);
+ case 1:
+ EMIT(0x90);
+ return;
+ case 3:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0);
+ return;
+ case 4:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x40);
+ EMIT(0);
+ return;
+ case 6:
+ EMIT(0x66);
+ case 5:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x44);
+ EMIT(0);
+ EMIT(0);
+ return;
+ case 7:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x80);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ return;
+ default:
+ case 11:
+ EMIT(0x66);
+ bytes--;
+ case 10:
+ EMIT(0x66);
+ bytes--;
+ case 9:
+ EMIT(0x66);
+ bytes--;
+ case 8:
+ EMIT(0xf);
+ EMIT(0x1f);
+ EMIT(0x84);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ EMIT(0);
+ bytes -= 8;
+ }
}
}
@@ -463,13 +546,6 @@ void Assembler::push(const Operand& src) {
}
-void Assembler::push(Handle<Object> handle) {
- EnsureSpace ensure_space(this);
- EMIT(0x68);
- emit(handle);
-}
-
-
void Assembler::pop(Register dst) {
ASSERT(reloc_info_writer.last_pc() != NULL);
EnsureSpace ensure_space(this);
@@ -1640,6 +1716,27 @@ void Assembler::fyl2x() {
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xD9);
+ EMIT(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ EMIT(0xDB);
+ EMIT(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -1953,6 +2050,16 @@ void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
}
+void Assembler::ucomisd(XMMRegister dst, const Operand& src) {
+ ASSERT(CpuFeatures::IsEnabled(SSE2));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x2E);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
ASSERT(CpuFeatures::IsEnabled(SSE4_1));
EnsureSpace ensure_space(this);
@@ -2158,6 +2265,19 @@ void Assembler::movd(const Operand& dst, XMMRegister src) {
}
+void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
+ EnsureSpace ensure_space(this);
+ EMIT(0x66);
+ EMIT(0x0F);
+ EMIT(0x3A);
+ EMIT(0x17);
+ emit_sse_operand(dst, src);
+ EMIT(imm8);
+}
+
+
void Assembler::pand(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index d798f818a..9ed46fc3d 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -659,6 +659,7 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m. m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -673,7 +674,6 @@ class Assembler : public AssemblerBase {
void push_imm32(int32_t imm32);
void push(Register src);
void push(const Operand& src);
- void push(Handle<Object> handle);
void pop(Register dst);
void pop(const Operand& dst);
@@ -926,6 +926,9 @@ class Assembler : public AssemblerBase {
void fsin();
void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void fadd(int i);
void fsub(int i);
@@ -983,6 +986,7 @@ class Assembler : public AssemblerBase {
void andpd(XMMRegister dst, XMMRegister src);
void ucomisd(XMMRegister dst, XMMRegister src);
+ void ucomisd(XMMRegister dst, const Operand& src);
enum RoundingMode {
kRoundToNearest = 0x0,
@@ -1017,6 +1021,7 @@ class Assembler : public AssemblerBase {
void movss(XMMRegister dst, const Operand& src);
void movss(const Operand& dst, XMMRegister src);
void movss(XMMRegister dst, XMMRegister src);
+ void extractps(Register dst, XMMRegister src, byte imm8);
void pand(XMMRegister dst, XMMRegister src);
void pxor(XMMRegister dst, XMMRegister src);
@@ -1080,7 +1085,7 @@ class Assembler : public AssemblerBase {
// Get the number of bytes available in the buffer.
inline int available_space() const { return reloc_info_writer.pos() - pc_; }
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index e12e79af7..28a9b0fad 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -1238,37 +1238,42 @@ static void ArrayNativeCode(MacroAssembler* masm,
false,
&prepare_generic_code_call);
__ IncrementCounter(counters->array_function_native(), 1);
- __ mov(eax, ebx);
- __ pop(ebx);
- if (construct_call) {
- __ pop(edi);
- }
- __ push(eax);
- // eax: JSArray
+ __ push(ebx);
+ __ mov(ebx, Operand(esp, kPointerSize));
// ebx: argc
// edx: elements_array_end (untagged)
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
// Location of the last argument
- __ lea(edi, Operand(esp, 2 * kPointerSize));
+ int last_arg_offset = (construct_call ? 4 : 3) * kPointerSize;
+ __ lea(edi, Operand(esp, last_arg_offset));
// Location of the first array element (Parameter fill_with_holes to
- // AllocateJSArrayis false, so the FixedArray is returned in ecx).
+ // AllocateJSArray is false, so the FixedArray is returned in ecx).
__ lea(edx, Operand(ecx, FixedArray::kHeaderSize - kHeapObjectTag));
+ Label has_non_smi_element;
+
// ebx: argc
// edx: location of the first array element
// edi: location of the last argument
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
Label loop, entry;
__ mov(ecx, ebx);
__ jmp(&entry);
__ bind(&loop);
__ mov(eax, Operand(edi, ecx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(eax, &has_non_smi_element);
+ }
__ mov(Operand(edx, 0), eax);
__ add(edx, Immediate(kPointerSize));
__ bind(&entry);
@@ -1278,13 +1283,20 @@ static void ArrayNativeCode(MacroAssembler* masm,
// Remove caller arguments from the stack and return.
// ebx: argc
// esp[0]: JSArray
- // esp[4]: return address
- // esp[8]: last argument
+ // esp[4]: argc
+ // esp[8]: constructor (only if construct_call)
+ // esp[12]: return address
+ // esp[16]: last argument
+ __ mov(ecx, Operand(esp, last_arg_offset - kPointerSize));
+ __ pop(eax);
+ __ pop(ebx);
+ __ lea(esp, Operand(esp, ebx, times_pointer_size,
+ last_arg_offset - kPointerSize));
+ __ jmp(ecx);
+
+ __ bind(&has_non_smi_element);
+ // Throw away the array that's only been partially constructed.
__ pop(eax);
- __ pop(ecx);
- __ lea(esp, Operand(esp, ebx, times_pointer_size, 1 * kPointerSize));
- __ push(ecx);
- __ ret(0);
// Restore argc and constructor before running the generic code.
__ bind(&prepare_generic_code_call);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 68eebd3a0..eabf201d7 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -2938,157 +2938,263 @@ void FloatingPointHelper::CheckFloatOperandsAreInt32(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // edx = base
- // eax = exponent
- // ecx = temporary, result
-
CpuFeatures::Scope use_sse2(SSE2);
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ mov(edx, Operand(esp, 2 * kPointerSize));
- __ mov(eax, Operand(esp, 1 * kPointerSize));
-
- // Save 1 in xmm3 - we need this several times later on.
- __ mov(ecx, Immediate(1));
- __ cvtsi2sd(xmm3, ecx);
-
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(eax, &exponent_nonsmi);
- __ JumpIfNotSmi(edx, &base_nonsmi);
-
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&powi);
- // exponent is smi and base is a heapnumber.
- __ bind(&base_nonsmi);
Factory* factory = masm->isolate()->factory();
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
+ const Register exponent = eax;
+ const Register base = edx;
+ const Register scratch = ecx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
+
+ Label call_runtime, done, exponent_not_smi, int_exponent;
+
+ // Save 1 in double_result - we need this several times later on.
+ __ mov(scratch, Immediate(1));
+ __ cvtsi2sd(double_result, scratch);
+
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ mov(base, Operand(esp, 2 * kPointerSize));
+ __ mov(exponent, Operand(esp, 1 * kPointerSize));
+
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ cmp(FieldOperand(base, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+
+ __ movdbl(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ cvtsi2sd(double_base, base);
+
+ __ bind(&unpack_exponent);
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ cmp(FieldOperand(exponent, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(not_equal, &call_runtime);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ movdbl(double_exponent,
+ FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, Operand(double_exponent));
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmp(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ mov(scratch, Immediate(0x3F000000u));
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &not_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(&not_plus_half);
+ // Load double_exponent with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, single-precision -Infinity has the highest
+ // 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000u);
+ __ movd(double_scratch, scratch);
+ __ cvtss2sd(double_scratch, double_scratch);
+ __ ucomisd(double_base, double_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiUntag(eax);
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ movdbl(Operand(esp, 0), double_exponent);
+ __ fld_d(Operand(esp, 0)); // E
+ __ movdbl(Operand(esp, 0), double_base);
+ __ fld_d(Operand(esp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ test_b(eax, 0x5F); // We check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&done);
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ mov(edx, eax);
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ add(esp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ __ mov(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmp(eax, 0);
- __ j(greater_equal, &no_neg, Label::kNear);
- __ neg(eax);
+ Label no_neg, while_true, no_multiply;
+ __ test(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ neg(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movsd(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shr(eax, 1);
+ __ shr(scratch, 1);
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
- // base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ test(edx, edx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ mov(ecx, Immediate(0x7FB00000));
- __ movd(xmm0, ecx);
- __ cvtss2sd(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
+ __ mulsd(double_scratch, double_scratch);
+ __ j(not_zero, &while_true);
- Label base_not_smi;
- Label handle_special_cases;
- __ JumpIfNotSmi(edx, &base_not_smi, Label::kNear);
- __ SmiUntag(edx);
- __ cvtsi2sd(xmm0, edx);
- __ jmp(&handle_special_cases, Label::kNear);
-
- __ bind(&base_not_smi);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(not_equal, &call_runtime);
- __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ and_(ecx, HeapNumber::kExponentMask);
- __ cmp(ecx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
+ // scratch has the original value of the exponent - if the exponent is
+ // negative, return 1/result.
+ __ test(exponent, exponent);
+ __ j(positive, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result); // Result cannot be NaN.
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // exponent is a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtsi2sd(double_exponent, exponent);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ mov(ecx, Immediate(0xBF000000));
- __ movd(xmm2, ecx);
- __ cvtss2sd(xmm2, xmm2);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movsd(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(ecx, eax, edx, &call_runtime);
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm1);
- __ mov(eax, ecx);
- __ ret(2 * kPointerSize);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(eax, scratch, base, &call_runtime);
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(4, scratch);
+ __ movdbl(Operand(esp, 0 * kDoubleSize), double_base);
+ __ movdbl(Operand(esp, 1 * kDoubleSize), double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 4);
+ }
+ // Return value is in st(0) on ia32.
+ // Store it into the (fixed) result register.
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fstp_d(Operand(esp, 0));
+ __ movdbl(double_result, Operand(esp, 0));
+ __ add(esp, Immediate(kDoubleSize));
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -4540,7 +4646,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// megamorphic.
__ cmp(ecx, Immediate(UninitializedSentinel(isolate)));
__ j(equal, &initialize, Label::kNear);
- // MegamorphicSentinel is a root so no write-barrier is needed.
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write-barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(MegamorphicSentinel(isolate)));
__ jmp(&call, Label::kNear);
@@ -4548,14 +4655,7 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// An uninitialized cache is patched with the function.
__ bind(&initialize);
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset), edi);
- __ mov(ecx, edi);
- __ RecordWriteField(ebx,
- JSGlobalPropertyCell::kValueOffset,
- ecx,
- edx,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET, // Cells are rescanned.
- OMIT_SMI_CHECK);
+ // No need for a write barrier here - cells are rescanned.
__ bind(&call);
}
@@ -4587,6 +4687,8 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
// non-function case.
__ mov(ebx, Operand(esp, 0));
__ mov(ebx, Operand(ebx, 1));
+ // MegamorphicSentinel is an immortal immovable object (undefined) so no
+ // write barrier is needed.
__ mov(FieldOperand(ebx, JSGlobalPropertyCell::kValueOffset),
Immediate(MegamorphicSentinel(isolate)));
}
@@ -5991,20 +6093,23 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ JumpIfNotSmi(edx, &runtime);
__ sub(ecx, edx);
__ cmp(ecx, FieldOperand(eax, String::kLengthOffset));
- Label return_eax;
- __ j(equal, &return_eax);
+ Label not_original_string;
+ __ j(not_equal, &not_original_string, Label::kNear);
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
+ __ bind(&not_original_string);
// Special handling of sub-strings of length 1 and 2. One character strings
// are handled in the runtime system (looked up in the single character
// cache). Two character strings are looked for in the symbol cache.
- __ SmiUntag(ecx); // Result length is no longer smi.
- __ cmp(ecx, 2);
+ __ cmp(ecx, Immediate(Smi::FromInt(2)));
__ j(greater, &result_longer_than_two);
__ j(less, &runtime);
// Sub string of length 2 requested.
// eax: string
// ebx: instance type
- // ecx: sub string length (value is 2)
+ // ecx: sub string length (smi, value is 2)
// edx: from index (smi)
__ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &runtime);
@@ -6019,6 +6124,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
StringHelper::GenerateTwoCharacterSymbolTableProbe(
masm, ebx, ecx, eax, edx, edi,
&make_two_character_string, &make_two_character_string);
+ __ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
__ bind(&make_two_character_string);
@@ -6026,55 +6132,61 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 3 * kPointerSize));
__ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
__ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
- __ Set(ecx, Immediate(2));
+ __ Set(ecx, Immediate(Smi::FromInt(2)));
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); // Load index.
+
+ __ bind(&result_longer_than_two);
+ // eax: string
+ // ebx: instance type
+ // ecx: sub string length (smi)
+ // edx: from index (smi)
+ // Deal with different string types: update the index if necessary
+ // and put the underlying string into edi.
+ Label underlying_unpacked, sliced_string, seq_or_external_string;
+ // If the string is not indirect, it can only be sequential or external.
+ STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+ STATIC_ASSERT(kIsIndirectStringMask != 0);
+ __ test(ebx, Immediate(kIsIndirectStringMask));
+ __ j(zero, &seq_or_external_string, Label::kNear);
+
+ Factory* factory = masm->isolate()->factory();
+ __ test(ebx, Immediate(kSlicedNotConsMask));
+ __ j(not_zero, &sliced_string, Label::kNear);
+ // Cons string. Check whether it is flat, then fetch first part.
+ // Flat cons strings have an empty second part.
+ __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
+ factory->empty_string());
+ __ j(not_equal, &runtime);
+ __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&sliced_string);
+ // Sliced string. Fetch parent and adjust start index by offset.
+ __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
+ __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
+ // Update instance type.
+ __ mov(ebx, FieldOperand(edi, HeapObject::kMapOffset));
+ __ movzx_b(ebx, FieldOperand(ebx, Map::kInstanceTypeOffset));
+ __ jmp(&underlying_unpacked, Label::kNear);
+
+ __ bind(&seq_or_external_string);
+ // Sequential or external string. Just move string to the expected register.
+ __ mov(edi, eax);
+
+ __ bind(&underlying_unpacked);
if (FLAG_string_slices) {
Label copy_routine;
- // If coming from the make_two_character_string path, the string
- // is too short to be sliced anyways.
- STATIC_ASSERT(2 < SlicedString::kMinLength);
- __ jmp(&copy_routine);
- __ bind(&result_longer_than_two);
-
- // eax: string
- // ebx: instance type
- // ecx: sub string length
- // edx: from index (smi)
- Label allocate_slice, sliced_string, seq_or_external_string;
- __ cmp(ecx, SlicedString::kMinLength);
- // Short slice. Copy instead of slicing.
- __ j(less, &copy_routine);
- // If the string is not indirect, it can only be sequential or external.
- STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
- STATIC_ASSERT(kIsIndirectStringMask != 0);
- __ test(ebx, Immediate(kIsIndirectStringMask));
- __ j(zero, &seq_or_external_string, Label::kNear);
-
- Factory* factory = masm->isolate()->factory();
- __ test(ebx, Immediate(kSlicedNotConsMask));
- __ j(not_zero, &sliced_string, Label::kNear);
- // Cons string. Check whether it is flat, then fetch first part.
- __ cmp(FieldOperand(eax, ConsString::kSecondOffset),
- factory->empty_string());
- __ j(not_equal, &runtime);
- __ mov(edi, FieldOperand(eax, ConsString::kFirstOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&sliced_string);
- // Sliced string. Fetch parent and correct start index by offset.
- __ add(edx, FieldOperand(eax, SlicedString::kOffsetOffset));
- __ mov(edi, FieldOperand(eax, SlicedString::kParentOffset));
- __ jmp(&allocate_slice, Label::kNear);
-
- __ bind(&seq_or_external_string);
- // Sequential or external string. Just move string to the correct register.
- __ mov(edi, eax);
-
- __ bind(&allocate_slice);
// edi: underlying subject string
// ebx: instance type of original subject string
- // edx: offset
- // ecx: length
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ __ cmp(ecx, Immediate(Smi::FromInt(SlicedString::kMinLength)));
+ // Short slice. Copy instead of slicing.
+ __ j(less, &copy_routine);
// Allocate new sliced string. At this point we do not reload the instance
// type including the string encoding because we simply rely on the info
// provided by the original string. It does not matter if the original
@@ -6091,27 +6203,50 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
__ bind(&set_slice_header);
__ mov(FieldOperand(eax, SlicedString::kOffsetOffset), edx);
- __ SmiTag(ecx);
__ mov(FieldOperand(eax, SlicedString::kLengthOffset), ecx);
__ mov(FieldOperand(eax, SlicedString::kParentOffset), edi);
__ mov(FieldOperand(eax, SlicedString::kHashFieldOffset),
Immediate(String::kEmptyHashField));
- __ jmp(&return_eax);
+ __ IncrementCounter(counters->sub_string_native(), 1);
+ __ ret(3 * kPointerSize);
__ bind(&copy_routine);
- } else {
- __ bind(&result_longer_than_two);
}
- // eax: string
- // ebx: instance type
- // ecx: result string length
- // Check for flat ascii string
- Label non_ascii_flat;
- __ JumpIfInstanceTypeIsNotSequentialAscii(ebx, ebx, &non_ascii_flat);
+ // edi: underlying subject string
+ // ebx: instance type of original subject string
+ // edx: adjusted start index (smi)
+ // ecx: length (smi)
+ // The subject string can only be external or sequential string of either
+ // encoding at this point.
+ Label two_byte_sequential, runtime_drop_two, sequential_string;
+ STATIC_ASSERT(kExternalStringTag != 0);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ test_b(ebx, kExternalStringTag);
+ __ j(zero, &sequential_string);
+
+ // Handle external string.
+ Label ascii_external, done;
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ test_b(ebx, kShortExternalStringMask);
+ __ j(not_zero, &runtime);
+ __ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+ __ bind(&sequential_string);
+ // Stash away (adjusted) index and (underlying) string.
+ __ push(edx);
+ __ push(edi);
+ __ SmiUntag(ecx);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
+ __ test_b(ebx, kStringEncodingMask);
+ __ j(zero, &two_byte_sequential);
- // Allocate the result.
- __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime);
+ // Sequential ascii string. Allocate the result.
+ __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6120,11 +6255,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edi, eax);
__ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
__ SmiUntag(ebx);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6133,20 +6267,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, true);
__ mov(esi, edx); // Restore esi.
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
- __ bind(&non_ascii_flat);
- // eax: string
- // ebx: instance type & kStringRepresentationMask | kStringEncodingMask
- // ecx: result string length
- // Check for flat two byte string
- __ cmp(ebx, kSeqStringTag | kTwoByteStringTag);
- __ j(not_equal, &runtime);
-
- // Allocate the result.
- __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime);
+ __ bind(&two_byte_sequential);
+ // Sequential two-byte string. Allocate the result.
+ __ AllocateTwoByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
// eax: result string
// ecx: result string length
@@ -6156,14 +6282,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ add(edi,
Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
- __ mov(esi, Operand(esp, 3 * kPointerSize));
- __ add(esi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ mov(ebx, Operand(esp, 2 * kPointerSize)); // from
+ __ pop(esi);
+ __ pop(ebx);
// As from is a smi it is 2 times the value which matches the size of a two
// byte character.
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- __ add(esi, ebx);
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqTwoByteString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6172,11 +6297,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// esi: character of sub string start
StringHelper::GenerateCopyCharactersREP(masm, edi, esi, ecx, ebx, false);
__ mov(esi, edx); // Restore esi.
-
- __ bind(&return_eax);
__ IncrementCounter(counters->sub_string_native(), 1);
__ ret(3 * kPointerSize);
+ // Drop pushed values on the stack before tail call.
+ __ bind(&runtime_drop_two);
+ __ Drop(2);
+
// Just jump to runtime to create the sub string.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kSubString, 3, 1);
@@ -6568,33 +6695,45 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(ecx);
- __ push(edx);
- __ push(eax);
- __ push(ecx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &miss, Label::kNear);
+
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
+ __ cmp(ecx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ cmp(ebx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ sub(eax, edx);
+ __ ret(0);
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
// Call the runtime system in a fresh internal frame.
ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
- __ push(edx);
+ __ push(edx); // Preserve edx and eax.
+ __ push(eax);
+ __ push(edx); // And also use them as the arguments.
__ push(eax);
__ push(Immediate(Smi::FromInt(op_)));
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
+ __ pop(eax);
+ __ pop(edx);
}
- // Compute the entry point of the rewritten stub.
- __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(ecx);
- __ pop(eax);
- __ pop(edx);
- __ push(ecx);
-
// Do a tail call to the rewritten stub.
__ jmp(edi);
}
diff --git a/deps/v8/src/ia32/debug-ia32.cc b/deps/v8/src/ia32/debug-ia32.cc
index 264956078..b37b54b8c 100644
--- a/deps/v8/src/ia32/debug-ia32.cc
+++ b/deps/v8/src/ia32/debug-ia32.cc
@@ -258,9 +258,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index eeee4f2b7..98c240079 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -231,8 +231,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -250,8 +250,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index da2239011..b5ddcca19 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -763,10 +763,13 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFC: mnem = "frndint"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -788,6 +791,8 @@ int DisassemblerIA32::RegisterFPUInstruction(int escape_opcode,
has_register = true;
} else if (modrm_byte == 0xE2) {
mnem = "fclex";
+ } else if (modrm_byte == 0xE3) {
+ mnem = "fninit";
} else {
UnimplementedInstruction();
}
@@ -987,7 +992,7 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x0F:
- { byte f0byte = *(data+1);
+ { byte f0byte = data[1];
const char* f0mnem = F0Mnem(f0byte);
if (f0byte == 0x18) {
int mod, regop, rm;
@@ -995,6 +1000,25 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
const char* suffix[] = {"nta", "1", "2", "3"};
AppendToBuffer("%s%s ", f0mnem, suffix[regop & 0x03]);
data += PrintRightOperand(data);
+ } else if (f0byte == 0x1F && data[2] == 0) {
+ AppendToBuffer("nop"); // 3 byte nop.
+ data += 3;
+ } else if (f0byte == 0x1F && data[2] == 0x40 && data[3] == 0) {
+ AppendToBuffer("nop"); // 4 byte nop.
+ data += 4;
+ } else if (f0byte == 0x1F && data[2] == 0x44 && data[3] == 0 &&
+ data[4] == 0) {
+ AppendToBuffer("nop"); // 5 byte nop.
+ data += 5;
+ } else if (f0byte == 0x1F && data[2] == 0x80 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0) {
+ AppendToBuffer("nop"); // 7 byte nop.
+ data += 7;
+ } else if (f0byte == 0x1F && data[2] == 0x84 && data[3] == 0 &&
+ data[4] == 0 && data[5] == 0 && data[6] == 0 &&
+ data[7] == 0) {
+ AppendToBuffer("nop"); // 8 byte nop.
+ data += 8;
} else if (f0byte == 0xA2 || f0byte == 0x31) {
AppendToBuffer("%s", f0mnem);
data += 2;
@@ -1130,8 +1154,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
break;
case 0x66: // prefix
- data++;
- if (*data == 0x8B) {
+ while (*data == 0x66) data++;
+ if (*data == 0xf && data[1] == 0x1f) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x90) {
+ AppendToBuffer("nop"); // 0x66 prefix
+ } else if (*data == 0x8B) {
data++;
data += PrintOperands("mov_w", REG_OPER_OP_ORDER, data);
} else if (*data == 0x89) {
@@ -1185,6 +1213,16 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x17) {
+ data++;
+ int mod, regop, rm;
+ get_modrm(*data, &mod, &regop, &rm);
+ int8_t imm8 = static_cast<int8_t>(data[1]);
+ AppendToBuffer("extractps %s,%s,%d",
+ NameOfCPURegister(regop),
+ NameOfXMMRegister(rm),
+ static_cast<int>(imm8));
+ data += 2;
} else if (*data == 0x22) {
data++;
int mod, regop, rm;
@@ -1258,6 +1296,9 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(rm),
static_cast<int>(imm8));
data += 2;
+ } else if (*data == 0x90) {
+ data++;
+ AppendToBuffer("nop"); // 2 byte nop.
} else if (*data == 0xF3) {
data++;
int mod, regop, rm;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index ef4f0c5f2..6e2391110 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -2883,7 +2883,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(SSE2)) {
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
@@ -3787,7 +3787,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->true_value());
} else {
- __ push(isolate()->factory()->true_value());
+ __ Push(isolate()->factory()->true_value());
}
__ jmp(&done, Label::kNear);
__ bind(&materialize_false);
@@ -3795,7 +3795,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (context()->IsAccumulatorValue()) {
__ mov(eax, isolate()->factory()->false_value());
} else {
- __ push(isolate()->factory()->false_value());
+ __ Push(isolate()->factory()->false_value());
}
__ bind(&done);
}
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index e93353e5d..a83db129a 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1625,6 +1625,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index d5ef4d95a..23db87406 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -341,6 +341,13 @@ int LCodeGen::ToInteger32(LConstantOperand* op) const {
}
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+ Handle<Object> literal = chunk_->LookupLiteral(op);
+ ASSERT(chunk_->LookupLiteralRepresentation(op).IsTagged());
+ return literal;
+}
+
+
double LCodeGen::ToDouble(LConstantOperand* op) const {
Handle<Object> value = chunk_->LookupLiteral(op);
return value->Number();
@@ -518,7 +525,7 @@ void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
} else if (context->IsConstantOperand()) {
Handle<Object> literal =
chunk_->LookupLiteral(LConstantOperand::cast(context));
- LoadHeapObject(esi, Handle<Context>::cast(literal));
+ __ LoadHeapObject(esi, Handle<Context>::cast(literal));
} else {
UNREACHABLE();
}
@@ -1219,7 +1226,7 @@ void LCodeGen::DoConstantT(LConstantT* instr) {
Register reg = ToRegister(instr->result());
Handle<Object> handle = instr->value();
if (handle->IsHeapObject()) {
- LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
+ __ LoadHeapObject(reg, Handle<HeapObject>::cast(handle));
} else {
__ Set(reg, Immediate(handle));
}
@@ -2030,7 +2037,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// the stub.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
- __ mov(InstanceofStub::right(), Immediate(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 13;
int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
__ mov(temp, Immediate(delta));
@@ -2137,20 +2144,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ mov(FieldOperand(object, offset), value);
-
- // Cells are always in the remembered set.
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteField(object,
- offset,
- value,
- address,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- check_needed);
- }
+ // Cells are always rescanned, so no write barrier here.
}
@@ -2171,13 +2165,22 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ mov(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ mov(ContextOperand(context, instr->slot_index()), value);
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ cmp(target, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
+ }
+ __ mov(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@@ -2229,7 +2232,24 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
+ }
+}
+
+
+void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
+ ASSERT(!operand->IsDoubleRegister());
+ if (operand->IsConstantOperand()) {
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
+ } else if (operand->IsRegister()) {
+ __ push(ToRegister(operand));
+ } else {
+ __ push(ToOperand(operand));
}
}
@@ -2639,17 +2659,13 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
void LCodeGen::DoPushArgument(LPushArgument* instr) {
LOperand* argument = instr->InputAt(0);
- if (argument->IsConstantOperand()) {
- __ push(ToImmediate(argument));
- } else {
- __ push(ToOperand(argument));
- }
+ EmitPushTaggedOperand(argument);
}
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2719,7 +2735,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->function());
+ __ LoadHeapObject(edi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2893,12 +2909,12 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ movdbl(xmm_scratch, Operand::StaticVariable(one_half));
__ ucomisd(xmm_scratch, input_reg);
__ j(above, &below_half);
- // input = input + 0.5
- __ addsd(input_reg, xmm_scratch);
+ // xmm_scratch = input + 0.5
+ __ addsd(xmm_scratch, input_reg);
// Compute Math.floor(value + 0.5).
// Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, Operand(input_reg));
+ __ cvttsd2si(output_reg, Operand(xmm_scratch));
// Overflow is signalled with minint.
__ cmp(output_reg, 0x80000000u);
@@ -2934,72 +2950,67 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
+ Register scratch = ToRegister(instr->temp());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, single-precision
+ // -Infinity has the highest 9 bits set and the lowest 23 bits cleared.
+ __ mov(scratch, 0xFF800000);
+ __ movd(xmm_scratch, scratch);
+ __ cvtss2sd(xmm_scratch, xmm_scratch);
+ __ ucomisd(input_reg, xmm_scratch);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
-
- if (exponent_type.IsDouble()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), ToDoubleRegister(right));
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(eax));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
+
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(eax, &no_deopt);
+ __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!ToRegister(right).is(ebx));
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ mov(Operand(esp, 1 * kDoubleSize), ToRegister(right));
- __ CallCFunction(ExternalReference::power_double_int_function(isolate()),
- 4);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsTagged());
- CpuFeatures::Scope scope(SSE2);
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiUntag(right_reg);
- __ cvtsi2sd(result_reg, Operand(right_reg));
- __ jmp(&call);
-
- __ bind(&non_smi);
- // It is safe to use ebx directly since the instruction is marked
- // as a call.
- ASSERT(!right_reg.is(ebx));
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , ebx);
- DeoptimizeIf(not_equal, instr->environment());
- __ movdbl(result_reg, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(4, ebx);
- __ movdbl(Operand(esp, 0 * kDoubleSize), ToDoubleRegister(left));
- __ movdbl(Operand(esp, 1 * kDoubleSize), result_reg);
- __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
- 4);
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
-
- // Return value is in st(0) on ia32.
- // Store it into the (fixed) result register.
- __ sub(Operand(esp), Immediate(kDoubleSize));
- __ fstp_d(Operand(esp, 0));
- __ movdbl(result_reg, Operand(esp, 0));
- __ add(Operand(esp), Immediate(kDoubleSize));
}
@@ -3072,9 +3083,6 @@ void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
case kMathSqrt:
DoMathSqrt(instr);
break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
case kMathCos:
DoMathCos(instr);
break;
@@ -3159,7 +3167,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(eax));
- __ mov(edi, instr->target());
+ __ LoadHeapObject(edi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3524,16 +3532,8 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoStringAdd(LStringAdd* instr) {
- if (instr->left()->IsConstantOperand()) {
- __ push(ToImmediate(instr->left()));
- } else {
- __ push(ToOperand(instr->left()));
- }
- if (instr->right()->IsConstantOperand()) {
- __ push(ToImmediate(instr->right()));
- } else {
- __ push(ToOperand(instr->right()));
- }
+ EmitPushTaggedOperand(instr->left());
+ EmitPushTaggedOperand(instr->right());
StringAddStub stub(NO_STRING_CHECK_IN_STUB);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4032,7 +4032,7 @@ void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
__ cmp(reg, Operand::Cell(cell));
} else {
Operand operand = ToOperand(instr->value());
- __ cmp(operand, instr->hydrogen()->target());
+ __ cmp(operand, target);
}
DeoptimizeIf(not_equal, instr->environment());
}
@@ -4096,17 +4096,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (isolate()->heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- isolate()->factory()->NewJSGlobalPropertyCell(object);
- __ mov(result, Operand::Cell(cell));
- } else {
- __ mov(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -4114,7 +4103,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -4124,7 +4113,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@@ -4136,17 +4125,32 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
-
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(eax, instr->hydrogen()->boilerplate_object());
+ __ mov(ebx, FieldOperand(eax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2". We only need the first byte,
+ // but the following masking takes care of that anyway.
+ __ mov(ebx, FieldOperand(ebx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(ebx, Map::kElementsKindMask);
+ __ cmp(ebx, boilerplate_elements_kind << Map::kElementsKindShift);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
// Setup the parameters to the stub/runtime call.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(eax, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ push(Immediate(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ push(Immediate(Handle<FixedArray>(heap->empty_fixed_array())));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -4162,9 +4166,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -4179,7 +4183,7 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
ASSERT(!result.is(ecx));
if (FLAG_debug_code) {
- LoadHeapObject(ecx, object);
+ __ LoadHeapObject(ecx, object);
__ cmp(source, ecx);
__ Assert(equal, "Unexpected object literal boilerplate");
}
@@ -4209,10 +4213,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(ecx, Operand(result, *offset));
__ mov(FieldOperand(result, total_offset), ecx);
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
__ mov(FieldOperand(result, total_offset), ecx);
} else {
__ mov(FieldOperand(result, total_offset), Immediate(value));
@@ -4237,7 +4241,7 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset);
ASSERT_EQ(size, offset);
}
@@ -4359,11 +4363,7 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
void LCodeGen::DoTypeof(LTypeof* instr) {
LOperand* input = instr->InputAt(1);
- if (input->IsConstantOperand()) {
- __ push(ToImmediate(input));
- } else {
- __ push(ToOperand(input));
- }
+ EmitPushTaggedOperand(input);
CallRuntime(Runtime::kTypeof, 1, instr);
}
@@ -4487,9 +4487,7 @@ void LCodeGen::EnsureSpaceForLazyDeopt() {
int patch_size = Deoptimizer::patch_size();
if (current_pc < last_lazy_deopt_pc_ + patch_size) {
int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- while (padding_size-- > 0) {
- __ nop();
- }
+ __ Nop(padding_size);
}
last_lazy_deopt_pc_ = masm()->pc_offset();
}
@@ -4513,11 +4511,7 @@ void LCodeGen::DoDeleteProperty(LDeleteProperty* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
__ push(ToOperand(obj));
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
+ EmitPushTaggedOperand(key);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@@ -4614,16 +4608,8 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
void LCodeGen::DoIn(LIn* instr) {
LOperand* obj = instr->object();
LOperand* key = instr->key();
- if (key->IsConstantOperand()) {
- __ push(ToImmediate(key));
- } else {
- __ push(ToOperand(key));
- }
- if (obj->IsConstantOperand()) {
- __ push(ToImmediate(obj));
- } else {
- __ push(ToOperand(obj));
- }
+ EmitPushTaggedOperand(key);
+ EmitPushTaggedOperand(obj);
ASSERT(instr->HasPointerMap() && instr->HasDeoptimizationEnvironment());
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 9d1a4f78d..dd335a4e5 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -207,8 +207,6 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
-
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode);
@@ -227,6 +225,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
XMMRegister ToDoubleRegister(int index) const;
int ToInteger32(LConstantOperand* op) const;
+ Handle<Object> ToHandle(LConstantOperand* op) const;
double ToDouble(LConstantOperand* op) const;
Operand BuildFastArrayOperand(LOperand* elements_pointer,
LOperand* key,
@@ -239,7 +238,6 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
@@ -306,6 +304,10 @@ class LCodeGen BASE_EMBEDDED {
void EnsureSpaceForLazyDeopt();
+ // Emits code for pushing either a tagged constant, a (non-double)
+ // register, or a stack slot operand.
+ void EmitPushTaggedOperand(LOperand* operand);
+
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 4e5f27854..f364439f1 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -298,6 +298,12 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
+void LMathPowHalf::PrintDataTo(StringStream* stream) {
+ stream->Add("/pow_half ");
+ InputAt(0)->PrintTo(stream);
+}
+
+
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
InputAt(0)->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1184,6 +1190,11 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
+ if (op == kMathPowHalf) {
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ return DefineSameAsFirst(result);
+ }
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
switch (op) {
@@ -1195,8 +1206,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return AssignEnvironment(DefineAsRegister(result));
case kMathSqrt:
return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
default:
UNREACHABLE();
return NULL;
@@ -1437,9 +1446,9 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
// We need to use fixed result register for the call.
Representation exponent_type = instr->right()->representation();
ASSERT(instr->left()->representation().IsDouble());
- LOperand* left = UseFixedDouble(instr->left(), xmm1);
+ LOperand* left = UseFixedDouble(instr->left(), xmm2);
LOperand* right = exponent_type.IsDouble() ?
- UseFixedDouble(instr->right(), xmm2) :
+ UseFixedDouble(instr->right(), xmm1) :
UseFixed(instr->right(), eax);
LPower* result = new(zone()) LPower(left, right);
return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
@@ -1866,7 +1875,9 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ LInstruction* result =
+ DefineAsRegister(new(zone()) LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1881,7 +1892,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
temp = NULL;
}
- return new(zone()) LStoreContextSlot(context, value, temp);
+ LInstruction* result = new(zone()) LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 517064722..7e126ff03 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -123,6 +123,7 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathPowHalf) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -582,6 +583,24 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
};
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+
+ virtual void PrintDataTo(StringStream* stream);
+};
+
+
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index fcae7a2fc..2e4cfa4f0 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -755,7 +755,7 @@ void MacroAssembler::PushTryHandler(CodeLocation try_location,
// Push the state and the code object.
push(Immediate(state));
- push(CodeObject());
+ Push(CodeObject());
// Link the current handler as the next handler.
ExternalReference handler_address(Isolate::kHandlerAddress, isolate());
@@ -2022,7 +2022,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- mov(edi, Immediate(function));
+ LoadHeapObject(edi, function);
mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
@@ -2151,6 +2151,29 @@ int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ mov(result, Operand::Cell(cell));
+ } else {
+ mov(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ push(Operand::Cell(cell));
+ } else {
+ Push(object);
+ }
+}
+
+
void MacroAssembler::Ret() {
ret(0);
}
@@ -2182,11 +2205,6 @@ void MacroAssembler::Move(Register dst, Register src) {
}
-void MacroAssembler::Move(Register dst, Handle<Object> value) {
- mov(dst, value);
-}
-
-
void MacroAssembler::SetCounter(StatsCounter* counter, int value) {
if (FLAG_native_code_counters && counter->Enabled()) {
mov(Operand::StaticVariable(ExternalReference(counter)), Immediate(value));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 03ec28a8a..46f99be20 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -237,6 +237,9 @@ class MacroAssembler: public Assembler {
void StoreToSafepointRegisterSlot(Register dst, Immediate src);
void LoadFromSafepointRegisterSlot(Register dst, Register src);
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
// ---------------------------------------------------------------------------
// JavaScript invokes
@@ -718,10 +721,8 @@ class MacroAssembler: public Assembler {
// Move if the registers are not identical.
void Move(Register target, Register source);
- void Move(Register target, Handle<Object> value);
-
// Push a handle value.
- void Push(Handle<Object> handle) { push(handle); }
+ void Push(Handle<Object> handle) { push(Immediate(handle)); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index aa8f47a88..c27a60fd0 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -429,7 +429,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ mov(edi, Immediate(function));
+ __ LoadHeapObject(edi, function);
__ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
// Pass the additional arguments.
@@ -1025,7 +1025,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1036,7 +1036,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ mov(eax, value);
+ __ LoadHeapObject(eax, value);
__ ret(0);
}
@@ -2522,23 +2522,9 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ mov(cell_operand, eax);
- Label done;
- __ test(eax, Immediate(kSmiTagMask));
- __ j(zero, &done);
-
- __ mov(ecx, eax);
- __ lea(edx, cell_operand);
- // Cells are always in the remembered set.
- __ RecordWrite(ebx, // Object.
- edx, // Address.
- ecx, // Value.
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ // No write barrier here, because cells are always rescanned.
// Return the value (register eax).
- __ bind(&done);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2729,7 +2715,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- eax : receiver
@@ -2891,7 +2877,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- eax : key
// -- edx : receiver
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 498cf3af3..56cea8176 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -1,4 +1,4 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@@ -36,7 +36,7 @@ namespace v8 {
namespace internal {
-Address IC::address() {
+Address IC::address() const {
// Get the address of the call.
Address result = pc() - Assembler::kCallTargetAddressOffset;
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 2c6d55b93..ad447cca5 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -40,13 +40,13 @@ namespace v8 {
namespace internal {
#ifdef DEBUG
-static char TransitionMarkFromState(IC::State state) {
+char IC::TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
- case MEGAMORPHIC: return 'N';
+ case MEGAMORPHIC: return IsGeneric() ? 'G' : 'N';
// We never see the debugger states here, because the state is
// computed from the original code - not the patched code. Let
@@ -80,19 +80,7 @@ void IC::TraceIC(const char* type,
raw_frame = it.frame();
}
}
- if (raw_frame->is_java_script()) {
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Code* js_code = frame->unchecked_code();
- // Find the function on the stack and both the active code for the
- // function and the original code.
- JSFunction* function = JSFunction::cast(frame->function());
- function->PrintName();
- int code_offset =
- static_cast<int>(address() - js_code->instruction_start());
- PrintF("+%d", code_offset);
- } else {
- PrintF("<unknown>");
- }
+ JavaScriptFrame::PrintTop(stdout, false, true);
PrintF(" (%c->%c)",
TransitionMarkFromState(old_state),
TransitionMarkFromState(new_state));
@@ -100,13 +88,23 @@ void IC::TraceIC(const char* type,
PrintF("]\n");
}
}
-#endif // DEBUG
+#define TRACE_GENERIC_IC(type, reason) \
+ do { \
+ if (FLAG_trace_ic) { \
+ PrintF("[%s patching generic stub in ", type); \
+ JavaScriptFrame::PrintTop(stdout, false, true); \
+ PrintF(" (%s)]\n", reason); \
+ } \
+ } while (false)
+
+#else
+#define TRACE_GENERIC_IC(type, reason)
+#endif // DEBUG
#define TRACE_IC(type, name, old_state, new_target) \
ASSERT((TraceIC(type, name, old_state, new_target), true))
-
IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
ASSERT(isolate == Isolate::Current());
// To improve the performance of the (much used) IC code, we unfold
@@ -137,7 +135,7 @@ IC::IC(FrameDepth depth, Isolate* isolate) : isolate_(isolate) {
#ifdef ENABLE_DEBUGGER_SUPPORT
-Address IC::OriginalCodeAddress() {
+Address IC::OriginalCodeAddress() const {
HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC
// structure. We need this to be able to find the function
@@ -914,7 +912,7 @@ void LoadIC::UpdateCaches(LookupResult* lookup,
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
- Handle<Object> constant(lookup->GetConstantFunction());
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeLoadConstant(
name, receiver, holder, constant);
break;
@@ -1123,6 +1121,8 @@ MaybeObject* KeyedLoadIC::Load(State state,
stub = ComputeStub(receiver, LOAD, kNonStrictMode, stub);
}
}
+ } else {
+ TRACE_GENERIC_IC("KeyedLoadIC", "force generic");
}
if (!stub.is_null()) set_target(*stub);
}
@@ -1163,7 +1163,7 @@ void KeyedLoadIC::UpdateCaches(LookupResult* lookup,
name, receiver, holder, lookup->GetFieldIndex());
break;
case CONSTANT_FUNCTION: {
- Handle<Object> constant(lookup->GetConstantFunction());
+ Handle<JSFunction> constant(lookup->GetConstantFunction());
code = isolate()->stub_cache()->ComputeKeyedLoadConstant(
name, receiver, holder, constant);
break;
@@ -1473,6 +1473,7 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
// via megamorphic stubs, since they don't have a map in their relocation info
// and so the stubs can't be harvested for the object needed for a map check.
if (target()->type() != NORMAL) {
+ TRACE_GENERIC_IC("KeyedIC", "non-NORMAL target type");
return generic_stub;
}
@@ -1494,12 +1495,14 @@ Handle<Code> KeyedIC::ComputeStub(Handle<JSObject> receiver,
if (!map_added) {
// If the miss wasn't due to an unseen map, a polymorphic stub
// won't help, use the generic stub.
+ TRACE_GENERIC_IC("KeyedIC", "same map added twice");
return generic_stub;
}
// If the maximum number of receiver maps has been exceeded, use the generic
// version of the IC.
if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+ TRACE_GENERIC_IC("KeyedIC", "max polymorph exceeded");
return generic_stub;
}
@@ -1685,6 +1688,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
stub = ComputeStub(receiver, stub_kind, strict_mode, stub);
}
+ } else {
+ TRACE_GENERIC_IC("KeyedStoreIC", "force generic");
}
}
if (!stub.is_null()) set_target(*stub);
@@ -2315,6 +2320,7 @@ const char* CompareIC::GetStateName(State state) {
case SMIS: return "SMIS";
case HEAP_NUMBERS: return "HEAP_NUMBERS";
case OBJECTS: return "OBJECTS";
+ case KNOWN_OBJECTS: return "OBJECTS";
case SYMBOLS: return "SYMBOLS";
case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
@@ -2329,19 +2335,38 @@ CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- if (!has_inlined_smi_code && state != UNINITIALIZED && state != SYMBOLS) {
- return GENERIC;
+ switch (state) {
+ case UNINITIALIZED:
+ if (x->IsSmi() && y->IsSmi()) return SMIS;
+ if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
+ if (!Token::IsEqualityOp(op_)) return GENERIC;
+ if (x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
+ if (x->IsString() && y->IsString()) return STRINGS;
+ if (x->IsJSObject() && y->IsJSObject()) {
+ if (Handle<JSObject>::cast(x)->map() ==
+ Handle<JSObject>::cast(y)->map() &&
+ Token::IsEqualityOp(op_)) {
+ return KNOWN_OBJECTS;
+ } else {
+ return OBJECTS;
+ }
+ }
+ return GENERIC;
+ case SMIS:
+ return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
+ ? HEAP_NUMBERS
+ : GENERIC;
+ case SYMBOLS:
+ ASSERT(Token::IsEqualityOp(op_));
+ return x->IsString() && y->IsString() ? STRINGS : GENERIC;
+ case HEAP_NUMBERS:
+ case STRINGS:
+ case OBJECTS:
+ case KNOWN_OBJECTS:
+ case GENERIC:
+ return GENERIC;
}
- if (state == UNINITIALIZED && x->IsSmi() && y->IsSmi()) return SMIS;
- if ((state == UNINITIALIZED || (state == SMIS && has_inlined_smi_code)) &&
- x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
- if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return GENERIC;
- if (state == UNINITIALIZED &&
- x->IsSymbol() && y->IsSymbol()) return SYMBOLS;
- if ((state == UNINITIALIZED || state == SYMBOLS) &&
- x->IsString() && y->IsString()) return STRINGS;
- if (state == UNINITIALIZED &&
- x->IsJSObject() && y->IsJSObject()) return OBJECTS;
+ UNREACHABLE();
return GENERIC;
}
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index 81aa6b7c2..94e83dc51 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -91,10 +91,13 @@ class IC {
// Construct the IC structure with the given number of extra
// JavaScript frames on the stack.
IC(FrameDepth depth, Isolate* isolate);
+ virtual ~IC() {}
// Get the call-site target; used for determining the state.
- Code* target() { return GetTargetAtAddress(address()); }
- inline Address address();
+ Code* target() const { return GetTargetAtAddress(address()); }
+ inline Address address() const;
+
+ virtual bool IsGeneric() const { return false; }
// Compute the current IC state based on the target stub, receiver and name.
static State StateFrom(Code* target, Object* receiver, Object* name);
@@ -139,13 +142,15 @@ class IC {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
// containing break points (calls to DebugBreakXXX builtins).
- Address OriginalCodeAddress();
+ Address OriginalCodeAddress() const;
#endif
// Set the call-site target.
void set_target(Code* code) { SetTargetAtAddress(address(), code); }
#ifdef DEBUG
+ char TransitionMarkFromState(IC::State state);
+
void TraceIC(const char* type,
Handle<Object> name,
State old_state,
@@ -452,6 +457,10 @@ class KeyedLoadIC: public KeyedIC {
bool is_js_array,
ElementsKind elements_kind);
+ virtual bool IsGeneric() const {
+ return target() == *generic_stub();
+ }
+
protected:
virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
@@ -477,7 +486,7 @@ class KeyedLoadIC: public KeyedIC {
Handle<Code> megamorphic_stub() {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
- Handle<Code> generic_stub() {
+ Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedLoadIC_Generic();
}
Handle<Code> pre_monomorphic_stub() {
@@ -595,6 +604,11 @@ class KeyedStoreIC: public KeyedIC {
bool is_js_array,
ElementsKind elements_kind);
+ virtual bool IsGeneric() const {
+ return target() == *generic_stub() ||
+ target() == *generic_stub_strict();
+ }
+
protected:
virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
@@ -632,10 +646,10 @@ class KeyedStoreIC: public KeyedIC {
Handle<Code> megamorphic_stub_strict() {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
- Handle<Code> generic_stub() {
+ Handle<Code> generic_stub() const {
return isolate()->builtins()->KeyedStoreIC_Generic();
}
- Handle<Code> generic_stub_strict() {
+ Handle<Code> generic_stub_strict() const {
return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
}
Handle<Code> non_strict_arguments_stub() {
@@ -710,6 +724,7 @@ class CompareIC: public IC {
SYMBOLS,
STRINGS,
OBJECTS,
+ KNOWN_OBJECTS,
GENERIC
};
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index fb3ac5697..a864c3467 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -619,8 +619,7 @@ class CodeFlusher {
}
void AddCandidate(JSFunction* function) {
- ASSERT(function->unchecked_code() ==
- function->unchecked_shared()->unchecked_code());
+ ASSERT(function->code() == function->shared()->code());
SetNextCandidate(function, jsfunction_candidates_head_);
jsfunction_candidates_head_ = function;
@@ -640,15 +639,15 @@ class CodeFlusher {
while (candidate != NULL) {
next_candidate = GetNextCandidate(candidate);
- SharedFunctionInfo* shared = candidate->unchecked_shared();
+ SharedFunctionInfo* shared = candidate->shared();
- Code* code = shared->unchecked_code();
+ Code* code = shared->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
} else {
- candidate->set_code(shared->unchecked_code());
+ candidate->set_code(shared->code());
}
// We are in the middle of a GC cycle so the write barrier in the code
@@ -674,7 +673,7 @@ class CodeFlusher {
next_candidate = GetNextCandidate(candidate);
SetNextCandidate(candidate, NULL);
- Code* code = candidate->unchecked_code();
+ Code* code = candidate->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
if (!code_mark.Get()) {
candidate->set_code(lazy_compile);
@@ -702,7 +701,7 @@ class CodeFlusher {
static SharedFunctionInfo** GetNextCandidateField(
SharedFunctionInfo* candidate) {
- Code* code = candidate->unchecked_code();
+ Code* code = candidate->code();
return reinterpret_cast<SharedFunctionInfo**>(
code->address() + Code::kNextCodeFlushingCandidateOffset);
}
@@ -884,8 +883,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()) {
IC::Clear(rinfo->pc());
- // Please note targets for cleared inline cached do not have to be
- // marked since they are contained in HEAP->non_monomorphic_cache().
target = Code::GetCodeFromTargetAddress(rinfo->target_address());
} else {
if (FLAG_cleanup_code_caches_at_gc &&
@@ -894,9 +891,10 @@ class StaticMarkingVisitor : public StaticVisitorBase {
target->has_function_cache()) {
CallFunctionStub::Clear(heap, rinfo->pc());
}
- MarkBit code_mark = Marking::MarkBitFrom(target);
- heap->mark_compact_collector()->MarkObject(target, code_mark);
}
+ MarkBit code_mark = Marking::MarkBitFrom(target);
+ heap->mark_compact_collector()->MarkObject(target, code_mark);
+
heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
}
@@ -1037,12 +1035,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
inline static bool IsCompiled(JSFunction* function) {
- return function->unchecked_code() !=
+ return function->code() !=
function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
inline static bool IsCompiled(SharedFunctionInfo* function) {
- return function->unchecked_code() !=
+ return function->code() !=
function->GetIsolate()->builtins()->builtin(Builtins::kLazyCompile);
}
@@ -1051,8 +1049,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
- MarkBit code_mark =
- Marking::MarkBitFrom(function->unchecked_code());
+ MarkBit code_mark = Marking::MarkBitFrom(function->code());
if (code_mark.Get()) {
if (!Marking::MarkBitFrom(shared_info).Get()) {
shared_info->set_code_age(0);
@@ -1061,7 +1058,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
// We do not flush code for optimized functions.
- if (function->code() != shared_info->unchecked_code()) {
+ if (function->code() != shared_info->code()) {
return false;
}
@@ -1072,7 +1069,7 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// Code is either on stack, in compilation cache or referenced
// by optimized version of function.
MarkBit code_mark =
- Marking::MarkBitFrom(shared_info->unchecked_code());
+ Marking::MarkBitFrom(shared_info->code());
if (code_mark.Get()) {
return false;
}
@@ -1085,16 +1082,24 @@ class StaticMarkingVisitor : public StaticVisitorBase {
// We never flush code for Api functions.
Object* function_data = shared_info->function_data();
- if (function_data->IsFunctionTemplateInfo()) return false;
+ if (function_data->IsFunctionTemplateInfo()) {
+ return false;
+ }
// Only flush code for functions.
- if (shared_info->code()->kind() != Code::FUNCTION) return false;
+ if (shared_info->code()->kind() != Code::FUNCTION) {
+ return false;
+ }
// Function must be lazy compilable.
- if (!shared_info->allows_lazy_compilation()) return false;
+ if (!shared_info->allows_lazy_compilation()) {
+ return false;
+ }
// If this is a full script wrapped in a function we do no flush the code.
- if (shared_info->is_toplevel()) return false;
+ if (shared_info->is_toplevel()) {
+ return false;
+ }
// Age this shared function info.
if (shared_info->code_age() < kCodeAgeThreshold) {
@@ -1267,30 +1272,12 @@ class StaticMarkingVisitor : public StaticVisitorBase {
}
if (!flush_code_candidate) {
- Code* code = jsfunction->unchecked_shared()->unchecked_code();
+ Code* code = jsfunction->shared()->code();
MarkBit code_mark = Marking::MarkBitFrom(code);
- heap->mark_compact_collector()->MarkObject(code, code_mark);
-
- if (jsfunction->unchecked_code()->kind() == Code::OPTIMIZED_FUNCTION) {
- // For optimized functions we should retain both non-optimized version
- // of it's code and non-optimized version of all inlined functions.
- // This is required to support bailing out from inlined code.
- DeoptimizationInputData* data =
- reinterpret_cast<DeoptimizationInputData*>(
- jsfunction->unchecked_code()->unchecked_deoptimization_data());
-
- FixedArray* literals = data->UncheckedLiteralArray();
-
- for (int i = 0, count = data->InlinedFunctionCount()->value();
- i < count;
- i++) {
- JSFunction* inlined = reinterpret_cast<JSFunction*>(literals->get(i));
- Code* inlined_code = inlined->unchecked_shared()->unchecked_code();
- MarkBit inlined_code_mark =
- Marking::MarkBitFrom(inlined_code);
- heap->mark_compact_collector()->MarkObject(
- inlined_code, inlined_code_mark);
- }
+ collector->MarkObject(code, code_mark);
+
+ if (jsfunction->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+ collector->MarkInlinedFunctionsCode(jsfunction->code());
}
}
@@ -1415,11 +1402,7 @@ class CodeMarkingVisitor : public ThreadVisitor {
: collector_(collector) {}
void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
- for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
- Code* code = it.frame()->unchecked_code();
- MarkBit code_bit = Marking::MarkBitFrom(code);
- collector_->MarkObject(it.frame()->unchecked_code(), code_bit);
- }
+ collector_->PrepareThreadForCodeFlushing(isolate, top);
}
private:
@@ -1441,8 +1424,8 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
if (obj->IsSharedFunctionInfo()) {
SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
MarkBit shared_mark = Marking::MarkBitFrom(shared);
- MarkBit code_mark = Marking::MarkBitFrom(shared->unchecked_code());
- collector_->MarkObject(shared->unchecked_code(), code_mark);
+ MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+ collector_->MarkObject(shared->code(), code_mark);
collector_->MarkObject(shared, shared_mark);
}
}
@@ -1452,6 +1435,44 @@ class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
};
+void MarkCompactCollector::MarkInlinedFunctionsCode(Code* code) {
+ // For optimized functions we should retain both non-optimized version
+ // of it's code and non-optimized version of all inlined functions.
+ // This is required to support bailing out from inlined code.
+ DeoptimizationInputData* data =
+ DeoptimizationInputData::cast(code->deoptimization_data());
+
+ FixedArray* literals = data->LiteralArray();
+
+ for (int i = 0, count = data->InlinedFunctionCount()->value();
+ i < count;
+ i++) {
+ JSFunction* inlined = JSFunction::cast(literals->get(i));
+ Code* inlined_code = inlined->shared()->code();
+ MarkBit inlined_code_mark = Marking::MarkBitFrom(inlined_code);
+ MarkObject(inlined_code, inlined_code_mark);
+ }
+}
+
+
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
+ ThreadLocalTop* top) {
+ for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+ // Note: for the frame that has a pending lazy deoptimization
+ // StackFrame::unchecked_code will return a non-optimized code object for
+ // the outermost function and StackFrame::LookupCode will return
+ // actual optimized code object.
+ StackFrame* frame = it.frame();
+ Code* code = frame->unchecked_code();
+ MarkBit code_mark = Marking::MarkBitFrom(code);
+ MarkObject(code, code_mark);
+ if (frame->is_optimized()) {
+ MarkInlinedFunctionsCode(frame->LookupCode());
+ }
+ }
+}
+
+
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
@@ -1479,11 +1500,8 @@ void MarkCompactCollector::PrepareForCodeFlushing() {
// Make sure we are not referencing the code from the stack.
ASSERT(this == heap()->mark_compact_collector());
- for (StackFrameIterator it; !it.done(); it.Advance()) {
- Code* code = it.frame()->unchecked_code();
- MarkBit code_mark = Marking::MarkBitFrom(code);
- MarkObject(code, code_mark);
- }
+ PrepareThreadForCodeFlushing(heap()->isolate(),
+ heap()->isolate()->thread_local_top());
// Iterate the archived stacks in all threads to check if
// the code is referenced.
@@ -2081,6 +2099,24 @@ void MarkCompactCollector::MarkLiveObjects() {
PrepareForCodeFlushing();
+ if (was_marked_incrementally_) {
+ // There is no write barrier on cells so we have to scan them now at the end
+ // of the incremental marking.
+ {
+ HeapObjectIterator cell_iterator(heap()->cell_space());
+ HeapObject* cell;
+ while ((cell = cell_iterator.Next()) != NULL) {
+ ASSERT(cell->IsJSGlobalPropertyCell());
+ if (IsMarked(cell)) {
+ int offset = JSGlobalPropertyCell::kValueOffset;
+ StaticMarkingVisitor::VisitPointer(
+ heap(),
+ reinterpret_cast<Object**>(cell->address() + offset));
+ }
+ }
+ }
+ }
+
RootMarkingVisitor root_visitor(heap());
MarkRoots(&root_visitor);
@@ -3673,6 +3709,7 @@ void MarkCompactCollector::SweepSpaces() {
#endif
SweeperType how_to_sweep =
FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE;
+ if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE;
if (sweep_precisely_) how_to_sweep = PRECISE;
// Noncompacting collections simply sweep the spaces to clear the mark
// bits and free the nonlive blocks (for old and map spaces). We sweep
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 254f175b6..e0a7d9495 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -383,6 +383,10 @@ class SlotsBuffer {
};
+// Defined in isolate.h.
+class ThreadLocalTop;
+
+
// -------------------------------------------------------------------------
// Mark-Compact collector
class MarkCompactCollector {
@@ -603,6 +607,14 @@ class MarkCompactCollector {
friend class CodeMarkingVisitor;
friend class SharedFunctionInfoMarkingVisitor;
+ // Mark non-optimize code for functions inlined into the given optimized
+ // code. This will prevent it from being flushed.
+ void MarkInlinedFunctionsCode(Code* code);
+
+ // Mark code objects that are active on the stack to prevent them
+ // from being flushed.
+ void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
+
void PrepareForCodeFlushing();
// Marking operations for objects reachable from roots.
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index 5a3f12ee3..5310938a6 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -246,6 +246,7 @@ function FormatMessage(message) {
"unprotected_const", ["Illegal const declaration in unprotected statement context."],
"cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
"redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+ "harmony_const_assign", ["Assignment to constant variable."],
];
var messages = { __proto__ : null };
for (var i = 0; i < messagesDictionary.length; i += 2) {
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index e7dda3fae..47f24a0dd 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -255,21 +255,61 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
+static void GenerateFastCloneShallowArrayCommon(
+ MacroAssembler* masm,
+ int length,
+ FastCloneShallowArrayStub::Mode mode,
+ Label* fail) {
+ // Registers on entry:
+ // a3: boilerplate literal array.
+ ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
- if (length_ > 0) {
- elements_size = mode_ == CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length_)
- : FixedArray::SizeFor(length_);
+ if (length > 0) {
+ elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
}
int size = JSArray::kSize + elements_size;
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ __ AllocateInNewSpace(size,
+ v0,
+ a1,
+ a2,
+ fail,
+ TAG_OBJECT);
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(v0, i));
+ }
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ Addu(a2, v0, Operand(JSArray::kSize));
+ __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+
+ // Copy the elements array.
+ ASSERT((elements_size % kPointerSize) == 0);
+ __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+ }
+}
+
+void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: constant elements.
+ // [sp + kPointerSize]: literal index.
+ // [sp + (2 * kPointerSize)]: literals array.
+
// Load boilerplate object into r3 and check if we need to create a
// boilerplate.
Label slow_case;
@@ -282,17 +322,42 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
__ Branch(&slow_case, eq, a3, Operand(t1));
+ FastCloneShallowArrayStub::Mode mode = mode_;
+ if (mode == CLONE_ANY_ELEMENTS) {
+ Label double_elements, check_fast_elements;
+ __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
+ __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+ __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
+ __ Branch(&check_fast_elements, ne, v0, Operand(t1));
+ GenerateFastCloneShallowArrayCommon(masm, 0,
+ COPY_ON_WRITE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ DropAndRet(3);
+
+ __ bind(&check_fast_elements);
+ __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
+ __ Branch(&double_elements, ne, v0, Operand(t1));
+ GenerateFastCloneShallowArrayCommon(masm, length_,
+ CLONE_ELEMENTS, &slow_case);
+ // Return and remove the on-stack parameters.
+ __ DropAndRet(3);
+
+ __ bind(&double_elements);
+ mode = CLONE_DOUBLE_ELEMENTS;
+ // Fall through to generate the code to handle double elements.
+ }
+
if (FLAG_debug_code) {
const char* message;
Heap::RootListIndex expected_map_index;
- if (mode_ == CLONE_ELEMENTS) {
+ if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode_ == CLONE_DOUBLE_ELEMENTS) {
+ } else if (mode == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
} else {
- ASSERT(mode_ == COPY_ON_WRITE_ELEMENTS);
+ ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
}
@@ -304,42 +369,59 @@ void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
__ pop(a3);
}
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- // Return new object in v0.
- __ AllocateInNewSpace(size,
- v0,
- a1,
- a2,
- &slow_case,
- TAG_OBJECT);
+ GenerateFastCloneShallowArrayCommon(masm, length_, mode, &slow_case);
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length_ == 0)) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
- }
+ // Return and remove the on-stack parameters.
+ __ Addu(sp, sp, Operand(3 * kPointerSize));
+ __ Ret();
- if (length_ > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ Addu(a2, v0, Operand(JSArray::kSize));
- __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
+ __ bind(&slow_case);
+ __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+}
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
+
+void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) {
+ // Stack layout on entry:
+ //
+ // [sp]: object literal flags.
+ // [sp + kPointerSize]: constant properties.
+ // [sp + (2 * kPointerSize)]: literal index.
+ // [sp + (3 * kPointerSize)]: literals array.
+
+ // Load boilerplate object into a3 and check if we need to create a
+ // boilerplate.
+ Label slow_case;
+ __ lw(a3, MemOperand(sp, 3 * kPointerSize));
+ __ lw(a0, MemOperand(sp, 2 * kPointerSize));
+ __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
+ __ Addu(a3, t0, a3);
+ __ lw(a3, MemOperand(a3));
+ __ LoadRoot(t0, Heap::kUndefinedValueRootIndex);
+ __ Branch(&slow_case, eq, a3, Operand(t0));
+
+ // Check that the boilerplate contains only fast properties and we can
+ // statically determine the instance size.
+ int size = JSObject::kHeaderSize + length_ * kPointerSize;
+ __ lw(a0, FieldMemOperand(a3, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+ __ Branch(&slow_case, ne, a0, Operand(size >> kPointerSizeLog2));
+
+ // Allocate the JS object and copy header together with all in-object
+ // properties from the boilerplate.
+ __ AllocateInNewSpace(size, a0, a1, a2, &slow_case, TAG_OBJECT);
+ for (int i = 0; i < size; i += kPointerSize) {
+ __ lw(a1, FieldMemOperand(a3, i));
+ __ sw(a1, FieldMemOperand(a0, i));
}
// Return and remove the on-stack parameters.
- __ Addu(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
+ __ Drop(4);
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
__ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
+ __ TailCallRuntime(Runtime::kCreateObjectLiteralShallow, 4, 1);
}
@@ -3510,113 +3592,218 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- Label call_runtime;
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatures::Scope scope(FPU);
-
- Label base_not_smi;
- Label exponent_not_smi;
- Label convert_exponent;
-
- const Register base = a0;
- const Register exponent = a2;
- const Register heapnumbermap = t1;
- const Register heapnumber = s0; // Callee-saved register.
- const Register scratch = t2;
- const Register scratch2 = t3;
-
- // Alocate FP values in the ABI-parameter-passing regs.
- const DoubleRegister double_base = f12;
- const DoubleRegister double_exponent = f14;
- const DoubleRegister double_result = f0;
- const DoubleRegister double_scratch = f2;
-
- __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+ CpuFeatures::Scope fpu_scope(FPU);
+ const Register base = a1;
+ const Register exponent = a2;
+ const Register heapnumbermap = t1;
+ const Register heapnumber = v0;
+ const DoubleRegister double_base = f2;
+ const DoubleRegister double_exponent = f4;
+ const DoubleRegister double_result = f0;
+ const DoubleRegister double_scratch = f6;
+ const FPURegister single_scratch = f8;
+ const Register scratch = t5;
+ const Register scratch2 = t3;
+
+ Label call_runtime, done, exponent_not_smi, int_exponent;
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack to double registers.
__ lw(base, MemOperand(sp, 1 * kPointerSize));
__ lw(exponent, MemOperand(sp, 0 * kPointerSize));
- // Convert base to double value and store it in f0.
- __ JumpIfNotSmi(base, &base_not_smi);
- // Base is a Smi. Untag and convert it.
- __ SmiUntag(base);
- __ mtc1(base, double_scratch);
- __ cvt_d_w(double_base, double_scratch);
- __ Branch(&convert_exponent);
+ __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
- __ bind(&base_not_smi);
+ __ JumpIfSmi(base, &base_is_smi);
__ lw(scratch, FieldMemOperand(base, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- // Base is a heapnumber. Load it into double register.
+
__ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent);
+
+ __ bind(&base_is_smi);
+ __ SmiUntag(base);
+ __ mtc1(base, single_scratch);
+ __ cvt_d_w(double_base, single_scratch);
+ __ bind(&unpack_exponent);
- __ bind(&convert_exponent);
__ JumpIfNotSmi(exponent, &exponent_not_smi);
__ SmiUntag(exponent);
-
- // The base is in a double register and the exponent is
- // an untagged smi. Allocate a heap number and call a
- // C function for integer exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
- __ push(ra);
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(double_base, exponent);
- {
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(
- ExternalReference::power_double_int_function(masm->isolate()), 1, 1);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
- }
- __ sdc1(double_result,
- FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(v0, heapnumber);
- __ DropAndRet(2 * kPointerSize);
+ __ jmp(&int_exponent);
__ bind(&exponent_not_smi);
__ lw(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
__ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
- // Exponent is a heapnumber. Load it into double register.
__ ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ // Base is already in double_base.
+ __ JumpIfNotSmi(exponent, &exponent_not_smi);
+ __ SmiUntag(exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ ldc1(double_exponent,
+ FieldMemOperand(exponent, HeapNumber::kValueOffset));
+ }
+
+ if (exponent_type_ != INTEGER) {
+ Label int_exponent_convert;
+ // Detect integer exponents stored as double.
+ __ EmitFPUTruncate(kRoundToMinusInf,
+ single_scratch,
+ double_exponent,
+ scratch,
+ scratch2,
+ kCheckForInexactConversion);
+ // scratch2 == 0 means there was no conversion error.
+ __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label not_plus_half;
+
+ // Test for 0.5.
+ __ Move(double_scratch, 0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ &not_plus_half,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ neg_d(double_result, double_scratch);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ sqrt_d(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&not_plus_half);
+ __ Move(double_scratch, -0.5);
+ __ BranchF(USE_DELAY_SLOT,
+ &call_runtime,
+ NULL,
+ ne,
+ double_exponent,
+ double_scratch);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ __ Move(double_scratch, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+ __ Move(double_result, kDoubleRegZero);
+
+ // Add +0 to convert -0 to +0.
+ __ add_d(double_scratch, double_base, kDoubleRegZero);
+ __ Move(double_result, 1);
+ __ sqrt_d(double_scratch, double_scratch);
+ __ div_d(double_result, double_result, double_scratch);
+ __ jmp(&done);
+ }
- // The base and the exponent are in double registers.
- // Allocate a heap number and call a C function for
- // double exponents. The register containing
- // the heap number is callee-saved.
- __ AllocateHeapNumber(heapnumber,
- scratch,
- scratch2,
- heapnumbermap,
- &call_runtime);
__ push(ra);
- __ PrepareCallCFunction(0, 2, scratch);
- // ABI (o32) for func(double a, double b): a in f12, b in f14.
- ASSERT(double_base.is(f12));
- ASSERT(double_exponent.is(f14));
- __ SetCallCDoubleArguments(double_base, double_exponent);
{
AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
- 0,
- 2);
- __ pop(ra);
- __ GetCFunctionDoubleResult(double_result);
+ 0, 2);
}
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
+ __ jmp(&done);
+
+ __ bind(&int_exponent_convert);
+ __ mfc1(exponent, single_scratch);
+ }
+
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+
+ __ mov(scratch, exponent); // Back up exponent.
+ __ mov_d(double_scratch, double_base); // Back up base.
+ __ Move(double_result, 1.0);
+
+ // Get absolute value of exponent.
+ Label positive_exponent;
+ __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+ __ Subu(scratch, zero_reg, scratch);
+ __ bind(&positive_exponent);
+
+ Label while_true, no_carry, loop_end;
+ __ bind(&while_true);
+
+ __ And(scratch2, scratch, 1);
+
+ __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+ __ mul_d(double_result, double_result, double_scratch);
+ __ bind(&no_carry);
+
+ __ sra(scratch, scratch, 1);
+
+ __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+ __ mul_d(double_scratch, double_scratch, double_scratch);
+
+ __ Branch(&while_true);
+
+ __ bind(&loop_end);
+
+ __ Branch(&done, ge, exponent, Operand(zero_reg));
+ __ Move(double_scratch, 1.0);
+ __ div_d(double_result, double_scratch, double_result);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+ // double_exponent may not contain the exponent value if the input was a
+ // smi. We set it with exponent value before bailing out.
+ __ mtc1(exponent, single_scratch);
+ __ cvt_d_w(double_exponent, single_scratch);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in exponent.
+ __ bind(&done);
+ __ AllocateHeapNumber(
+ heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
__ sdc1(double_result,
FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
- __ mov(v0, heapnumber);
- __ DropAndRet(2 * kPointerSize);
- }
+ ASSERT(heapnumber.is(v0));
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ DropAndRet(2);
+ } else {
+ __ push(ra);
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(0, 2, scratch);
+ __ SetCallCDoubleArguments(double_base, double_exponent);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()),
+ 0, 2);
+ }
+ __ pop(ra);
+ __ GetCFunctionDoubleResult(double_result);
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+ __ Ret();
+ }
}
@@ -4759,8 +4946,12 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
// First check for flat string. None of the following string type tests will
- // succeed if kIsNotStringTag is set.
- __ And(a1, a0, Operand(kIsNotStringMask | kStringRepresentationMask));
+ // succeed if subject is not a string or a short external string.
+ __ And(a1,
+ a0,
+ Operand(kIsNotStringMask |
+ kStringRepresentationMask |
+ kShortExternalStringMask));
STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
__ Branch(&seq_string, eq, a1, Operand(zero_reg));
@@ -4774,16 +4965,17 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// string. Also in this case the first part of the cons string is known to be
// a sequential string or an external string.
// In the case of a sliced string its offset has to be taken into account.
- Label cons_string, check_encoding;
+ Label cons_string, external_string, check_encoding;
STATIC_ASSERT(kConsStringTag < kExternalStringTag);
STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+ STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
__ Branch(&cons_string, lt, a1, Operand(kExternalStringTag));
- __ Branch(&runtime, eq, a1, Operand(kExternalStringTag));
+ __ Branch(&external_string, eq, a1, Operand(kExternalStringTag));
- // Catch non-string subject (should already have been guarded against).
- STATIC_ASSERT(kNotStringTag != 0);
- __ And(at, a1, Operand(kIsNotStringMask));
+ // Catch non-string subject or short external string.
+ STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+ __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
__ Branch(&runtime, ne, at, Operand(zero_reg));
// String is sliced.
@@ -4804,7 +4996,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
STATIC_ASSERT(kSeqStringTag == 0);
__ And(at, a0, Operand(kStringRepresentationMask));
- __ Branch(&runtime, ne, at, Operand(zero_reg));
+ __ Branch(&external_string, ne, at, Operand(zero_reg));
__ bind(&seq_string);
// subject: Subject string
@@ -5030,6 +5222,29 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ Addu(sp, sp, Operand(4 * kPointerSize));
__ Ret();
+ // External string. Short external strings have already been ruled out.
+ // a0: scratch
+ __ bind(&external_string);
+ __ lw(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+ __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, a0, Operand(kIsIndirectStringMask));
+ __ Assert(eq,
+ "external string expected, but not found",
+ at,
+ Operand(zero_reg));
+ }
+ __ lw(subject,
+ FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+ // Move the pointer so that offset-wise, it looks like a sequential string.
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ Subu(subject,
+ subject,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&seq_string);
+
// Do the runtime call to execute the regexp.
__ bind(&runtime);
__ TailCallRuntime(Runtime::kRegExpExec, 4, 1);
@@ -5288,77 +5503,14 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ lw(t0, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, t0, Operand(index_));
- // We need special handling for non-flat strings.
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(t0, result_, Operand(kStringRepresentationMask));
- __ Branch(&flat_string, eq, t0, Operand(zero_reg));
-
- // Handle non-flat strings.
- __ And(result_, result_, Operand(kStringRepresentationMask));
- STATIC_ASSERT(kConsStringTag < kExternalStringTag);
- STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
- __ Branch(&sliced_string, gt, result_, Operand(kExternalStringTag));
- __ Branch(&call_runtime_, eq, result_, Operand(kExternalStringTag));
-
- // ConsString.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- Label assure_seq_string;
- __ lw(result_, FieldMemOperand(object_, ConsString::kSecondOffset));
- __ LoadRoot(t0, Heap::kEmptyStringRootIndex);
- __ Branch(&call_runtime_, ne, result_, Operand(t0));
-
- // Get the first of the two parts.
- __ lw(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
- __ jmp(&assure_seq_string);
-
- // SlicedString, unpack and add offset.
- __ bind(&sliced_string);
- __ lw(result_, FieldMemOperand(object_, SlicedString::kOffsetOffset));
- __ Addu(index_, index_, result_);
- __ lw(object_, FieldMemOperand(object_, SlicedString::kParentOffset));
-
- // Assure that we are dealing with a sequential string. Go to runtime if not.
- __ bind(&assure_seq_string);
- __ lw(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
- // Check that parent is not an external string. Go to runtime otherwise.
- // Note that if the original string is a cons or slice with an external
- // string as underlying string, we pass that unpacked underlying string with
- // the adjusted index to the runtime function.
- STATIC_ASSERT(kSeqStringTag == 0);
-
- __ And(t0, result_, Operand(kStringRepresentationMask));
- __ Branch(&call_runtime_, ne, t0, Operand(zero_reg));
-
- // Check for 1-byte or 2-byte string.
- __ bind(&flat_string);
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(t0, result_, Operand(kStringEncodingMask));
- __ Branch(&ascii_string, ne, t0, Operand(zero_reg));
+ __ sra(index_, index_, kSmiTagSize);
- // 2-byte string.
- // Load the 2-byte character code into the result register. We can
- // add without shifting since the smi tag size is the log2 of the
- // number of bytes in a two-byte character.
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
- __ Addu(index_, object_, Operand(index_));
- __ lhu(result_, FieldMemOperand(index_, SeqTwoByteString::kHeaderSize));
- __ Branch(&got_char_code);
+ StringCharLoadGenerator::Generate(masm,
+ object_,
+ index_,
+ result_,
+ &call_runtime_);
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
-
- __ srl(t0, index_, kSmiTagSize);
- __ Addu(index_, object_, t0);
-
- __ lbu(result_, FieldMemOperand(index_, SeqAsciiString::kHeaderSize));
-
- __ bind(&got_char_code);
__ sll(result_, result_, kSmiTagSize);
__ bind(&exit_);
}
@@ -5407,6 +5559,7 @@ void StringCharCodeAtGenerator::GenerateSlow(
// is too complex (e.g., when the string needs to be flattened).
__ bind(&call_runtime_);
call_helper.BeforeCall(masm);
+ __ sll(index_, index_, kSmiTagSize);
__ Push(object_, index_);
__ CallRuntime(Runtime::kStringCharCodeAt, 2);
@@ -6821,26 +6974,39 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- __ Push(a1, a0);
- __ push(ra);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ __ And(a2, a1, a0);
+ __ JumpIfSmi(a2, &miss);
+ __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ lw(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Branch(&miss, ne, a2, Operand(known_map_));
+ __ Branch(&miss, ne, a3, Operand(known_map_));
+
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a0, a1);
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
- masm->isolate());
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(a1, a0);
+ __ push(ra);
+ __ Push(a1, a0);
__ li(t0, Operand(Smi::FromInt(op_)));
__ push(t0);
__ CallExternalReference(miss, 3);
+ // Compute the entry point of the rewritten stub.
+ __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+ // Restore registers.
+ __ Pop(a1, a0, ra);
}
- // Compute the entry point of the rewritten stub.
- __ Addu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
- // Restore registers.
- __ pop(ra);
- __ pop(a0);
- __ pop(a1);
__ Jump(a2);
}
@@ -7463,7 +7629,8 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Update the write barrier for the array store.
__ RecordWrite(t1, t2, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
// Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
// FAST_ELEMENTS, and value is Smi.
@@ -7472,14 +7639,16 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ sll(t2, a3, kPointerSizeLog2 - kSmiTagSize);
__ Addu(t2, t1, t2);
__ sw(a0, FieldMemOperand(t2, FixedArray::kHeaderSize));
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(a0, a3, a1, t1, t2, t3, t5, t6,
&slow_elements);
- __ Ret();
+ __ Ret(USE_DELAY_SLOT);
+ __ mov(v0, a0);
}
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index c94e0fa52..0b6838497 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -310,6 +310,98 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ pop(ra);
}
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime) {
+ // Fetch the instance type of the receiver into result register.
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // We need special handling for indirect strings.
+ Label check_sequential;
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+ // Dispatch on the indirect string shape: slice or cons.
+ Label cons_string;
+ __ And(at, result, Operand(kSlicedNotConsMask));
+ __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+ // Handle slices.
+ Label indirect_string_loaded;
+ __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+ __ sra(at, result, kSmiTagSize);
+ __ Addu(index, index, at);
+ __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
+ __ jmp(&indirect_string_loaded);
+
+ // Handle cons strings.
+ // Check whether the right hand side is the empty string (i.e. if
+ // this is really a flat string in a cons string). If that is not
+ // the case we would rather go to the runtime system now to flatten
+ // the string.
+ __ bind(&cons_string);
+ __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
+ __ LoadRoot(at, Heap::kEmptyStringRootIndex);
+ __ Branch(call_runtime, ne, result, Operand(at));
+ // Get the first of the two strings and load its instance type.
+ __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+ __ bind(&indirect_string_loaded);
+ __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+ // Distinguish sequential and external strings. Only these two string
+ // representations can reach here (slices and flat cons strings have been
+ // reduced to the underlying sequential or external string).
+ Label external_string, check_encoding;
+ __ bind(&check_sequential);
+ STATIC_ASSERT(kSeqStringTag == 0);
+ __ And(at, result, Operand(kStringRepresentationMask));
+ __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+ // Prepare sequential strings
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ Addu(string,
+ string,
+ SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+ __ jmp(&check_encoding);
+
+ // Handle external strings.
+ __ bind(&external_string);
+ if (FLAG_debug_code) {
+ // Assert that we do not have a cons or slice (indirect strings) here.
+ // Sequential strings have already been ruled out.
+ __ And(at, result, Operand(kIsIndirectStringMask));
+ __ Assert(eq, "external string expected, but not found",
+ at, Operand(zero_reg));
+ }
+ // Rule out short external strings.
+ STATIC_CHECK(kShortExternalStringTag != 0);
+ __ And(at, result, Operand(kShortExternalStringMask));
+ __ Branch(call_runtime, ne, at, Operand(zero_reg));
+ __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+ Label ascii, done;
+ __ bind(&check_encoding);
+ STATIC_ASSERT(kTwoByteStringTag == 0);
+ __ And(at, result, Operand(kStringEncodingMask));
+ __ Branch(&ascii, ne, at, Operand(zero_reg));
+ // Two-byte string.
+ __ sll(at, index, 1);
+ __ Addu(at, string, at);
+ __ lhu(result, MemOperand(at));
+ __ jmp(&done);
+ __ bind(&ascii);
+ // Ascii string.
+ __ Addu(at, string, index);
+ __ lbu(result, MemOperand(at));
+ __ bind(&done);
+}
+
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 4549509f3..e704c4f56 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -75,6 +75,21 @@ class CodeGenerator: public AstVisitor {
};
+class StringCharLoadGenerator : public AllStatic {
+ public:
+ // Generates the code for handling different string types and loading the
+ // indexed character into |result|. We expect |index| as untagged input and
+ // |result| as untagged output.
+ static void Generate(MacroAssembler* masm,
+ Register string,
+ Register index,
+ Register result,
+ Label* call_runtime);
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 201e6b8e1..1e950e5f5 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -1424,10 +1424,11 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
+ Handle<FixedArray> constant_properties = expr->constant_properties();
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
- __ li(a1, Operand(expr->constant_properties()));
+ __ li(a1, Operand(constant_properties));
int flags = expr->fast_elements()
? ObjectLiteral::kFastElements
: ObjectLiteral::kNoFlags;
@@ -1436,10 +1437,15 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
: ObjectLiteral::kNoFlags;
__ li(a0, Operand(Smi::FromInt(flags)));
__ Push(a3, a2, a1, a0);
+ int properties_count = constant_properties->length() / 2;
if (expr->depth() > 1) {
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
__ CallRuntime(Runtime::kCreateObjectLiteralShallow, 4);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ __ CallStub(&stub);
}
// If result_saved is true the result is on top of the stack. If
@@ -1540,6 +1546,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT_EQ(2, constant_elements->length());
ElementsKind constant_elements_kind =
static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ bool has_fast_elements = constant_elements_kind == FAST_ELEMENTS;
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
@@ -1549,7 +1556,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
__ Push(a3, a2, a1);
- if (constant_elements_values->map() ==
+ if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS, length);
@@ -1564,10 +1571,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
ASSERT(constant_elements_kind == FAST_ELEMENTS ||
constant_elements_kind == FAST_SMI_ONLY_ELEMENTS ||
FLAG_smi_only_arrays);
- FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ FastCloneShallowArrayStub::Mode mode = has_fast_elements
+ ? FastCloneShallowArrayStub::CLONE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
__ CallStub(&stub);
}
@@ -1589,65 +1595,30 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ push(v0);
result_saved = true;
}
+
VisitForAccumulatorValue(subexpr);
- __ lw(t6, MemOperand(sp)); // Copy of array literal.
- __ lw(a1, FieldMemOperand(t6, JSObject::kElementsOffset));
- __ lw(a2, FieldMemOperand(t6, JSObject::kMapOffset));
- int offset = FixedArray::kHeaderSize + (i * kPointerSize);
-
- Label element_done;
- Label double_elements;
- Label smi_element;
- Label slow_elements;
- Label fast_elements;
- __ CheckFastElements(a2, a3, &double_elements);
-
- // FAST_SMI_ONLY_ELEMENTS or FAST_ELEMENTS
- __ JumpIfSmi(result_register(), &smi_element);
- __ CheckFastSmiOnlyElements(a2, a3, &fast_elements);
-
- // Store into the array literal requires a elements transition. Call into
- // the runtime.
- __ bind(&slow_elements);
- __ push(t6); // Copy of array literal.
- __ li(a1, Operand(Smi::FromInt(i)));
- __ li(a2, Operand(Smi::FromInt(NONE))); // PropertyAttributes
- StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE)
- ? kNonStrictMode : kStrictMode;
- __ li(a3, Operand(Smi::FromInt(strict_mode_flag))); // Strict mode.
- __ Push(a1, result_register(), a2, a3);
- __ CallRuntime(Runtime::kSetProperty, 5);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
- __ bind(&double_elements);
- __ li(a3, Operand(Smi::FromInt(i)));
- __ StoreNumberToDoubleElements(result_register(), a3, t6, a1, t0, t1, t5,
- t3, &slow_elements);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_ELEMENTS and value is an object.
- __ bind(&fast_elements);
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Update the write barrier for the array store.
-
- __ RecordWriteField(
- a1, offset, result_register(), a2, kRAHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ Branch(&element_done);
-
- // Array literal has ElementsKind of FAST_SMI_ONLY_ELEMENTS or
- // FAST_ELEMENTS, and value is Smi.
- __ bind(&smi_element);
- __ sw(result_register(), FieldMemOperand(a1, offset));
- // Fall through
-
- __ bind(&element_done);
+ if (constant_elements_kind == FAST_ELEMENTS) {
+ int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+ __ lw(t2, MemOperand(sp)); // Copy of array literal.
+ __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset));
+ __ sw(result_register(), FieldMemOperand(a1, offset));
+ // Update the write barrier for the array store.
+ __ RecordWriteField(a1, offset, result_register(), a2,
+ kRAHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ } else {
+ __ lw(a1, MemOperand(sp)); // Copy of array literal.
+ __ lw(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+ __ li(a3, Operand(Smi::FromInt(i)));
+ __ li(t0, Operand(Smi::FromInt(expr->literal_index())));
+ __ mov(a0, result_register());
+ StoreArrayLiteralElementStub stub;
+ __ CallStub(&stub);
+ }
PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
}
-
if (result_saved) {
context()->PlugTOS();
} else {
@@ -2987,8 +2958,12 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
- __ CallStub(&stub);
+ if (CpuFeatures::IsSupported(FPU)) {
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kMath_pow, 2);
+ }
context()->Plug(v0);
}
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index b057695f0..c240125bb 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1587,6 +1587,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index c0879bbd9..aba7516fa 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -291,7 +291,22 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
if (op->IsRegister()) {
return ToRegister(op->index());
} else if (op->IsConstantOperand()) {
- __ li(scratch, ToOperand(op));
+ LConstantOperand* const_op = LConstantOperand::cast(op);
+ Handle<Object> literal = chunk_->LookupLiteral(const_op);
+ Representation r = chunk_->LookupLiteralRepresentation(const_op);
+ if (r.IsInteger32()) {
+ ASSERT(literal->IsNumber());
+ __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+ } else if (r.IsDouble()) {
+ Abort("EmitLoadRegister: Unsupported double immediate.");
+ } else {
+ ASSERT(r.IsTagged());
+ if (literal->IsSmi()) {
+ __ li(scratch, Operand(literal));
+ } else {
+ __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal));
+ }
+ }
return scratch;
} else if (op->IsStackSlot() || op->IsArgument()) {
__ lw(scratch, ToMemOperand(op));
@@ -1162,8 +1177,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ li(ToRegister(instr->result()), Operand(instr->value()));
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ li(ToRegister(instr->result()), Operand(value));
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -2039,7 +2059,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
// offset to the location of the map check.
Register temp = ToRegister(instr->TempAt(0));
ASSERT(temp.is(t0));
- __ li(InstanceofStub::right(), Operand(instr->function()));
+ __ LoadHeapObject(InstanceofStub::right(), instr->function());
static const int kAdditionalDelta = 7;
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
@@ -2141,21 +2161,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ sw(value, FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
-
- // Cells are always in the remembered set.
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- __ RecordWriteField(scratch,
- JSGlobalPropertyCell::kValueOffset,
- value,
- scratch2,
- kRAHasBeenSaved,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- check_needed);
- }
+ // Cells are always rescanned, so no write barrier here.
}
@@ -2175,6 +2181,10 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ lw(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+ }
}
@@ -2182,6 +2192,12 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
MemOperand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ Register scratch = scratch0();
+ __ lw(scratch, target);
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+ }
__ sw(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
@@ -2233,7 +2249,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2687,7 +2703,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2757,7 +2773,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
__ mov(a0, v0);
- __ li(a1, Operand(instr->function()));
+ __ LoadHeapObject(a1, instr->function());
CallKnownFunction(instr->function(), instr->arity(), instr, CALL_AS_METHOD);
}
@@ -2942,11 +2958,11 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ And(scratch, result, Operand(HeapNumber::kSignMask));
__ Move(double_scratch0(), 0.5);
- __ add_d(input, input, double_scratch0());
+ __ add_d(double_scratch0(), input, double_scratch0());
// Check sign of the result: if the sign changed, the input
// value was in ]0.5, 0[ and the result should be -0.
- __ mfc1(result, input.high());
+ __ mfc1(result, double_scratch0().high());
__ Xor(result, result, Operand(scratch));
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// ARM uses 'mi' here, which is 'lt'
@@ -2966,7 +2982,7 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
__ EmitFPUTruncate(kRoundToMinusInf,
double_scratch0().low(),
- input,
+ double_scratch0(),
result,
except_flag);
@@ -2996,69 +3012,54 @@ void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->InputAt(0));
DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch = double_scratch0();
+ DoubleRegister temp = ToDoubleRegister(instr->TempAt(0));
+
+ ASSERT(!input.is(result));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done;
+ __ Move(temp, -V8_INFINITY);
+ __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+ // Set up Infinity in the delay slot.
+ // result is overwritten if the branch is not taken.
+ __ neg_d(result, temp);
// Add +0 to convert -0 to +0.
- __ mtc1(zero_reg, double_scratch.low());
- __ mtc1(zero_reg, double_scratch.high());
- __ add_d(result, input, double_scratch);
+ __ add_d(result, input, kDoubleRegZero);
__ sqrt_d(result, result);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- LOperand* right = instr->InputAt(1);
- Register scratch = scratch0();
- DoubleRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left),
- ToDoubleRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(f4));
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(a2));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(f2));
+ ASSERT(ToDoubleRegister(instr->result()).is(f0));
+
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(a2, &no_deopt);
+ __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+ DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
} else if (exponent_type.IsInteger32()) {
- ASSERT(ToRegister(right).is(a0));
- // Prepare arguments and call C function.
- __ PrepareCallCFunction(1, 1, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), ToRegister(right));
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 1, 1);
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
} else {
- ASSERT(exponent_type.IsTagged());
- ASSERT(instr->hydrogen()->left()->representation().IsDouble());
-
- Register right_reg = ToRegister(right);
-
- // Check for smi on the right hand side.
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
-
- // Untag smi and convert it to a double.
- __ SmiUntag(right_reg);
- FPURegister single_scratch = double_scratch0();
- __ mtc1(right_reg, single_scratch);
- __ cvt_d_w(result_reg, single_scratch);
- __ Branch(&call);
-
- // Heap number map check.
- __ bind(&non_smi);
- __ lw(scratch, FieldMemOperand(right_reg, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
- __ ldc1(result_reg, FieldMemOperand(right_reg, HeapNumber::kValueOffset));
-
- // Prepare arguments and call C function.
- __ bind(&call);
- __ PrepareCallCFunction(0, 2, scratch);
- __ SetCallCDoubleArguments(ToDoubleRegister(left), result_reg);
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 0, 2);
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Store the result in the result register.
- __ GetCFunctionDoubleResult(result_reg);
}
@@ -3194,7 +3195,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
- __ li(a1, Operand(instr->target()));
+ __ LoadHeapObject(a1, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3520,89 +3521,13 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
LStringCharCodeAt* instr_;
};
- Register temp = scratch1();
- Register string = ToRegister(instr->string());
- Register index = ToRegister(instr->index());
- Register result = ToRegister(instr->result());
DeferredStringCharCodeAt* deferred =
new DeferredStringCharCodeAt(this, instr);
-
- // Fetch the instance type of the receiver into result register.
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // We need special handling for indirect strings.
- Label check_sequential;
- __ And(temp, result, kIsIndirectStringMask);
- __ Branch(&check_sequential, eq, temp, Operand(zero_reg));
-
- // Dispatch on the indirect string shape: slice or cons.
- Label cons_string;
- __ And(temp, result, kSlicedNotConsMask);
- __ Branch(&cons_string, eq, temp, Operand(zero_reg));
-
- // Handle slices.
- Label indirect_string_loaded;
- __ lw(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
- __ sra(temp, result, kSmiTagSize);
- __ addu(index, index, temp);
- __ lw(string, FieldMemOperand(string, SlicedString::kParentOffset));
- __ jmp(&indirect_string_loaded);
-
- // Handle conses.
- // Check whether the right hand side is the empty string (i.e. if
- // this is really a flat string in a cons string). If that is not
- // the case we would rather go to the runtime system now to flatten
- // the string.
- __ bind(&cons_string);
- __ lw(result, FieldMemOperand(string, ConsString::kSecondOffset));
- __ LoadRoot(temp, Heap::kEmptyStringRootIndex);
- __ Branch(deferred->entry(), ne, result, Operand(temp));
- // Get the first of the two strings and load its instance type.
- __ lw(string, FieldMemOperand(string, ConsString::kFirstOffset));
-
- __ bind(&indirect_string_loaded);
- __ lw(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
-
- // Check whether the string is sequential. The only non-sequential
- // shapes we support have just been unwrapped above.
- // Note that if the original string is a cons or slice with an external
- // string as underlying string, we pass that unpacked underlying string with
- // the adjusted index to the runtime function.
- __ bind(&check_sequential);
- STATIC_ASSERT(kSeqStringTag == 0);
- __ And(temp, result, Operand(kStringRepresentationMask));
- __ Branch(deferred->entry(), ne, temp, Operand(zero_reg));
-
- // Dispatch on the encoding: ASCII or two-byte.
- Label ascii_string;
- STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
- STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
- __ And(temp, result, Operand(kStringEncodingMask));
- __ Branch(&ascii_string, ne, temp, Operand(zero_reg));
-
- // Two-byte string.
- // Load the two-byte character code into the result register.
- Label done;
- __ Addu(result,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ sll(temp, index, 1);
- __ Addu(result, result, temp);
- __ lhu(result, MemOperand(result, 0));
- __ Branch(&done);
-
- // ASCII string.
- // Load the byte into the result register.
- __ bind(&ascii_string);
- __ Addu(result,
- string,
- Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
- __ Addu(result, result, index);
- __ lbu(result, MemOperand(result, 0));
-
- __ bind(&done);
+ StringCharLoadGenerator::Generate(masm(),
+ ToRegister(instr->string()),
+ ToRegister(instr->index()),
+ ToRegister(instr->result()),
+ deferred->entry());
__ bind(deferred->exit());
}
@@ -4098,10 +4023,20 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- DeoptimizeIf(ne, instr->environment(), reg,
- Operand(instr->hydrogen()->target()));
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Register reg = ToRegister(instr->value());
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ li(at, Operand(Handle<Object>(cell)));
+ __ lw(at, FieldMemOperand(at, JSGlobalPropertyCell::kValueOffset));
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(at));
+ } else {
+ DeoptimizeIf(ne, instr->environment(), reg,
+ Operand(target));
+ }
}
@@ -4170,19 +4105,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::LoadHeapObject(Register result,
- Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ li(result, Operand(cell));
- __ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
- } else {
- __ li(result, Operand(object));
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register temp1 = ToRegister(instr->TempAt(0));
Register temp2 = ToRegister(instr->TempAt(1));
@@ -4191,7 +4113,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -4203,7 +4125,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(temp1, current_prototype);
+ __ LoadHeapObject(temp1, current_prototype);
}
// Check the holder map.
@@ -4216,15 +4138,32 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
-
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(a1, instr->hydrogen()->boilerplate_object());
+ // Load map into a2.
+ __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
+ DeoptimizeIf(ne,
+ instr->environment(),
+ a2,
+ Operand(boilerplate_elements_kind));
+ }
__ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a1, Operand(constant_elements));
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ li(a1, Operand(Handle<FixedArray>(heap->empty_fixed_array())));
__ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
@@ -4241,29 +4180,108 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
-void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
+void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset) {
+ ASSERT(!source.is(a2));
+ ASSERT(!result.is(a2));
+
+ // Increase the offset so that subsequent objects end up right after
+ // this one.
+ int current_offset = *offset;
+ int size = object->map()->instance_size();
+ *offset += size;
+
+ // Copy object header.
+ ASSERT(object->properties()->length() == 0);
+ ASSERT(object->elements()->length() == 0 ||
+ object->elements()->map() == isolate()->heap()->fixed_cow_array_map());
+ int inobject_properties = object->map()->inobject_properties();
+ int header_size = size - inobject_properties * kPointerSize;
+ for (int i = 0; i < header_size; i += kPointerSize) {
+ __ lw(a2, FieldMemOperand(source, i));
+ __ sw(a2, FieldMemOperand(result, current_offset + i));
+ }
+
+ // Copy in-object properties.
+ for (int i = 0; i < inobject_properties; i++) {
+ int total_offset = current_offset + object->GetInObjectPropertyOffset(i);
+ Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ __ Addu(a2, result, Operand(*offset));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ __ LoadHeapObject(source, value_object);
+ EmitDeepCopy(value_object, result, source, offset);
+ } else if (value->IsHeapObject()) {
+ __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ } else {
+ __ li(a2, Operand(value));
+ __ sw(a2, FieldMemOperand(result, total_offset));
+ }
+ }
+}
+
+
+void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
+ int size = instr->hydrogen()->total_size();
+
+ // Allocate all objects that are part of the literal in one big
+ // allocation. This avoids multiple limit checks.
+ Label allocated, runtime_allocate;
+ __ AllocateInNewSpace(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+ __ jmp(&allocated);
+
+ __ bind(&runtime_allocate);
+ __ li(a0, Operand(Smi::FromInt(size)));
+ __ push(a0);
+ CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+
+ __ bind(&allocated);
+ int offset = 0;
+ __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
+ EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset);
+ ASSERT_EQ(size, offset);
+}
+
+
+void LCodeGen::DoObjectLiteralGeneric(LObjectLiteralGeneric* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
+
+ Handle<FixedArray> constant_properties =
+ instr->hydrogen()->constant_properties();
+
__ lw(t0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ lw(t0, FieldMemOperand(t0, JSFunction::kLiteralsOffset));
__ li(a3, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
- __ li(a2, Operand(instr->hydrogen()->constant_properties()));
- __ li(a1, Operand(Smi::FromInt(instr->hydrogen()->fast_elements() ? 1 : 0)));
+ __ li(a2, Operand(constant_properties));
+ int flags = instr->hydrogen()->fast_elements()
+ ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ __ li(a1, Operand(Smi::FromInt(flags)));
__ Push(t0, a3, a2, a1);
// Pick the right runtime function to call.
+ int properties_count = constant_properties->length() / 2;
if (instr->hydrogen()->depth() > 1) {
CallRuntime(Runtime::kCreateObjectLiteral, 4, instr);
- } else {
+ } else if (flags != ObjectLiteral::kFastElements ||
+ properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
CallRuntime(Runtime::kCreateObjectLiteralShallow, 4, instr);
+ } else {
+ FastCloneShallowObjectStub stub(properties_count);
+ CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index c67b46be7..32d4fb3f4 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -316,6 +316,13 @@ class LCodeGen BASE_EMBEDDED {
Handle<Map> type,
Handle<String> name);
+ // Emits optimized code to deep-copy the contents of statically known
+ // object graphs (e.g. object literal boilerplate).
+ void EmitDeepCopy(Handle<JSObject> object,
+ Register result,
+ Register source,
+ int* offset);
+
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 81a193a62..f963ec9e3 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -1152,6 +1152,13 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
+ } else if (op == kMathPowHalf) {
+ // Input cannot be the same as the result.
+ // See lithium-codegen-mips.cc::DoMathPowHalf.
+ LOperand* input = UseFixedDouble(instr->value(), f8);
+ LOperand* temp = FixedTemp(f6);
+ LUnaryMathOperation* result = new LUnaryMathOperation(input, temp);
+ return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
@@ -1165,8 +1172,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
- case kMathPowHalf:
- return DefineAsRegister(result);
default:
UNREACHABLE();
return NULL;
@@ -1401,9 +1406,9 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
LOperand* left = UseFixedDouble(instr->left(), f2);
LOperand* right = exponent_type.IsDouble() ?
UseFixedDouble(instr->right(), f4) :
- UseFixed(instr->right(), a0);
+ UseFixed(instr->right(), a2);
LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, f6),
+ return MarkAsCall(DefineFixedDouble(result, f0),
instr,
CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1796,7 +1801,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1810,7 +1816,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
context = UseRegister(instr->context());
value = UseRegister(instr->value());
}
- return new LStoreContextSlot(context, value);
+ LInstruction* result = new LStoreContextSlot(context, value);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -2071,8 +2078,14 @@ LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
}
-LInstruction* LChunkBuilder::DoObjectLiteral(HObjectLiteral* instr) {
- return MarkAsCall(DefineFixed(new LObjectLiteral, v0), instr);
+LInstruction* LChunkBuilder::DoObjectLiteralFast(HObjectLiteralFast* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralFast, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoObjectLiteralGeneric(
+ HObjectLiteralGeneric* instr) {
+ return MarkAsCall(DefineFixed(new LObjectLiteralGeneric, v0), instr);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index 40f3f7a54..efc5e274b 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -134,7 +134,8 @@ class LCodeGen;
V(NumberTagD) \
V(NumberTagI) \
V(NumberUntagD) \
- V(ObjectLiteral) \
+ V(ObjectLiteralFast) \
+ V(ObjectLiteralGeneric) \
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
@@ -1792,6 +1793,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
@@ -1899,10 +1902,17 @@ class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
};
-class LObjectLiteral: public LTemplateInstruction<1, 0, 0> {
+class LObjectLiteralFast: public LTemplateInstruction<1, 0, 0> {
+ public:
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralFast, "object-literal-fast")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralFast)
+};
+
+
+class LObjectLiteralGeneric: public LTemplateInstruction<1, 0, 0> {
public:
- DECLARE_CONCRETE_INSTRUCTION(ObjectLiteral, "object-literal")
- DECLARE_HYDROGEN_ACCESSOR(ObjectLiteral)
+ DECLARE_CONCRETE_INSTRUCTION(ObjectLiteralGeneric, "object-literal-generic")
+ DECLARE_HYDROGEN_ACCESSOR(ObjectLiteralGeneric)
};
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index c1161d73d..cdacbf3c0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -81,6 +81,19 @@ void MacroAssembler::StoreRoot(Register source,
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ li(result, Operand(cell));
+ lw(result, FieldMemOperand(result, JSGlobalPropertyCell::kValueOffset));
+ } else {
+ li(result, Operand(object));
+ }
+}
+
+
// Push and pop all registers that can hold pointers.
void MacroAssembler::PushSafepointRegisters() {
// Safepoints expect a block of kNumSafepointRegisters values on the
@@ -3555,7 +3568,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- li(a1, Operand(function));
+ LoadHeapObject(a1, function);
lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
ParameterCount expected(function->shared()->formal_parameter_count());
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 6b2a5511e..4e14fbf97 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -262,6 +262,7 @@ class MacroAssembler: public Assembler {
Heap::RootListIndex index,
Condition cond, Register src1, const Operand& src2);
+ void LoadHeapObject(Register dst, Handle<HeapObject> object);
// ---------------------------------------------------------------------------
// GC Support
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 76452f0c9..a94e277a5 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -574,7 +574,7 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ li(t1, Operand(function));
+ __ LoadHeapObject(t1, function);
__ lw(cp, FieldMemOperand(t1, JSFunction::kContextOffset));
// Pass the additional arguments FastHandleApiCall expects.
@@ -1115,7 +1115,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1127,7 +1127,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ li(v0, Operand(value));
+ __ LoadHeapObject(v0, value);
__ Ret();
}
@@ -2605,15 +2605,7 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ sw(a0, FieldMemOperand(t0, JSGlobalPropertyCell::kValueOffset));
__ mov(v0, a0); // Stored value must be returned in v0.
-
- // This trashes a0 but the value is returned in v0 anyway.
- __ RecordWriteField(t0,
- JSGlobalPropertyCell::kValueOffset,
- a0,
- a2,
- kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET);
+ // Cells are always rescanned, so no write barrier here.
Counters* counters = masm()->isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1, a1, a3);
@@ -2709,7 +2701,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -2847,7 +2839,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 39d6e0413..d6d65718e 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -1115,7 +1115,7 @@ void HeapObject::set_map(Map* value) {
// Unsafe accessor omitting write barrier.
-void HeapObject::set_map_unsafe(Map* value) {
+void HeapObject::set_map_no_write_barrier(Map* value) {
set_map_word(MapWord::FromMap(value));
}
@@ -1183,6 +1183,22 @@ int HeapNumber::get_sign() {
ACCESSORS(JSObject, properties, FixedArray, kPropertiesOffset)
+Object** FixedArray::GetFirstElementAddress() {
+ return reinterpret_cast<Object**>(FIELD_ADDR(this, OffsetOfElementAt(0)));
+}
+
+
+bool FixedArray::ContainsOnlySmisOrHoles() {
+ Object* the_hole = GetHeap()->the_hole_value();
+ Object** current = GetFirstElementAddress();
+ for (int i = 0; i < length(); ++i) {
+ Object* candidate = *current++;
+ if (!candidate->IsSmi() && candidate != the_hole) return false;
+ }
+ return true;
+}
+
+
FixedArrayBase* JSObject::elements() {
Object* array = READ_FIELD(this, kElementsOffset);
return static_cast<FixedArrayBase*>(array);
@@ -1211,38 +1227,66 @@ void JSObject::ValidateSmiOnlyElements() {
}
-MaybeObject* JSObject::EnsureCanContainNonSmiElements() {
+MaybeObject* JSObject::EnsureCanContainHeapObjectElements() {
#if DEBUG
ValidateSmiOnlyElements();
#endif
- if ((map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS)) {
- Object* obj;
- MaybeObject* maybe_obj = GetElementsTransitionMap(FAST_ELEMENTS);
- if (!maybe_obj->ToObject(&obj)) return maybe_obj;
- set_map(Map::cast(obj));
+ if ((map()->elements_kind() != FAST_ELEMENTS)) {
+ return TransitionElementsKind(FAST_ELEMENTS);
}
return this;
}
MaybeObject* JSObject::EnsureCanContainElements(Object** objects,
- uint32_t count) {
- if (map()->elements_kind() == FAST_SMI_ONLY_ELEMENTS) {
- for (uint32_t i = 0; i < count; ++i) {
- Object* current = *objects++;
- if (!current->IsSmi() && current != GetHeap()->the_hole_value()) {
- return EnsureCanContainNonSmiElements();
+ uint32_t count,
+ EnsureElementsMode mode) {
+ ElementsKind current_kind = map()->elements_kind();
+ ElementsKind target_kind = current_kind;
+ ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (current_kind == FAST_ELEMENTS) return this;
+
+ Heap* heap = GetHeap();
+ Object* the_hole = heap->the_hole_value();
+ Object* heap_number_map = heap->heap_number_map();
+ for (uint32_t i = 0; i < count; ++i) {
+ Object* current = *objects++;
+ if (!current->IsSmi() && current != the_hole) {
+ if (mode == ALLOW_CONVERTED_DOUBLE_ELEMENTS &&
+ HeapObject::cast(current)->map() == heap_number_map) {
+ target_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ target_kind = FAST_ELEMENTS;
+ break;
}
}
}
+
+ if (target_kind != current_kind) {
+ return TransitionElementsKind(target_kind);
+ }
return this;
}
-MaybeObject* JSObject::EnsureCanContainElements(FixedArray* elements) {
- Object** objects = reinterpret_cast<Object**>(
- FIELD_ADDR(elements, elements->OffsetOfElementAt(0)));
- return EnsureCanContainElements(objects, elements->length());
+MaybeObject* JSObject::EnsureCanContainElements(FixedArrayBase* elements,
+ EnsureElementsMode mode) {
+ if (elements->map() != GetHeap()->fixed_double_array_map()) {
+ ASSERT(elements->map() == GetHeap()->fixed_array_map() ||
+ elements->map() == GetHeap()->fixed_cow_array_map());
+ if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
+ mode = DONT_ALLOW_DOUBLE_ELEMENTS;
+ }
+ Object** objects = FixedArray::cast(elements)->GetFirstElementAddress();
+ return EnsureCanContainElements(objects, elements->length(), mode);
+ }
+
+ ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS) {
+ return TransitionElementsKind(FAST_DOUBLE_ELEMENTS);
+ }
+
+ return this;
}
@@ -1311,8 +1355,6 @@ void JSGlobalPropertyCell::set_value(Object* val, WriteBarrierMode ignored) {
// The write barrier is not used for global property cells.
ASSERT(!val->IsJSGlobalPropertyCell());
WRITE_FIELD(this, kValueOffset, val);
- GetHeap()->incremental_marking()->RecordWrite(
- this, HeapObject::RawField(this, kValueOffset), val);
}
@@ -1703,6 +1745,20 @@ void FixedArray::set(int index,
}
+void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value) {
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
+ ASSERT(index >= 0 && index < array->length());
+ int offset = kHeaderSize + index * kPointerSize;
+ WRITE_FIELD(array, offset, value);
+ Heap* heap = array->GetHeap();
+ if (heap->InNewSpace(value)) {
+ heap->RecordWrite(array->address(), offset);
+ }
+}
+
+
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
@@ -1797,12 +1853,12 @@ void DescriptorArray::set_bit_field3_storage(int value) {
}
-void DescriptorArray::NoWriteBarrierSwap(FixedArray* array,
- int first,
- int second) {
+void DescriptorArray::NoIncrementalWriteBarrierSwap(FixedArray* array,
+ int first,
+ int second) {
Object* tmp = array->get(first);
- NoWriteBarrierSet(array, first, array->get(second));
- NoWriteBarrierSet(array, second, tmp);
+ NoIncrementalWriteBarrierSet(array, first, array->get(second));
+ NoIncrementalWriteBarrierSet(array, second, tmp);
}
@@ -1914,20 +1970,16 @@ void DescriptorArray::Set(int descriptor_number,
// Range check.
ASSERT(descriptor_number < number_of_descriptors());
- // Make sure none of the elements in desc are in new space.
- ASSERT(!HEAP->InNewSpace(desc->GetKey()));
- ASSERT(!HEAP->InNewSpace(desc->GetValue()));
-
- NoWriteBarrierSet(this,
- ToKeyIndex(descriptor_number),
- desc->GetKey());
+ NoIncrementalWriteBarrierSet(this,
+ ToKeyIndex(descriptor_number),
+ desc->GetKey());
FixedArray* content_array = GetContentArray();
- NoWriteBarrierSet(content_array,
- ToValueIndex(descriptor_number),
- desc->GetValue());
- NoWriteBarrierSet(content_array,
- ToDetailsIndex(descriptor_number),
- desc->GetDetails().AsSmi());
+ NoIncrementalWriteBarrierSet(content_array,
+ ToValueIndex(descriptor_number),
+ desc->GetValue());
+ NoIncrementalWriteBarrierSet(content_array,
+ ToDetailsIndex(descriptor_number),
+ desc->GetDetails().AsSmi());
}
@@ -1941,15 +1993,16 @@ void DescriptorArray::CopyFrom(int index,
}
-void DescriptorArray::NoWriteBarrierSwapDescriptors(int first, int second) {
- NoWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
+void DescriptorArray::NoIncrementalWriteBarrierSwapDescriptors(
+ int first, int second) {
+ NoIncrementalWriteBarrierSwap(this, ToKeyIndex(first), ToKeyIndex(second));
FixedArray* content_array = GetContentArray();
- NoWriteBarrierSwap(content_array,
- ToValueIndex(first),
- ToValueIndex(second));
- NoWriteBarrierSwap(content_array,
- ToDetailsIndex(first),
- ToDetailsIndex(second));
+ NoIncrementalWriteBarrierSwap(content_array,
+ ToValueIndex(first),
+ ToValueIndex(second));
+ NoIncrementalWriteBarrierSwap(content_array,
+ ToDetailsIndex(first),
+ ToDetailsIndex(second));
}
@@ -4111,7 +4164,8 @@ ElementsKind JSObject::GetElementsKind() {
(map == GetHeap()->fixed_array_map() ||
map == GetHeap()->fixed_cow_array_map())) ||
(kind == FAST_DOUBLE_ELEMENTS &&
- fixed_array->IsFixedDoubleArray()) ||
+ (fixed_array->IsFixedDoubleArray() ||
+ fixed_array == GetHeap()->empty_fixed_array())) ||
(kind == DICTIONARY_ELEMENTS &&
fixed_array->IsFixedArray() &&
fixed_array->IsDictionary()) ||
@@ -4570,11 +4624,18 @@ void JSArray::set_length(Smi* length) {
}
-MaybeObject* JSArray::SetContent(FixedArray* storage) {
- MaybeObject* maybe_object = EnsureCanContainElements(storage);
- if (maybe_object->IsFailure()) return maybe_object;
- set_length(Smi::FromInt(storage->length()));
+MaybeObject* JSArray::SetContent(FixedArrayBase* storage) {
+ MaybeObject* maybe_result = EnsureCanContainElements(
+ storage, ALLOW_COPIED_DOUBLE_ELEMENTS);
+ if (maybe_result->IsFailure()) return maybe_result;
+ ASSERT((storage->map() == GetHeap()->fixed_double_array_map() &&
+ GetElementsKind() == FAST_DOUBLE_ELEMENTS) ||
+ ((storage->map() != GetHeap()->fixed_double_array_map()) &&
+ ((GetElementsKind() == FAST_ELEMENTS) ||
+ (GetElementsKind() == FAST_SMI_ONLY_ELEMENTS &&
+ FixedArray::cast(storage)->ContainsOnlySmisOrHoles()))));
set_elements(storage);
+ set_length(Smi::FromInt(storage->length()));
return this;
}
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 1565504c2..2a56797c2 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -961,14 +961,14 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields.
if (size >= ExternalString::kSize) {
- this->set_map(
+ this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->external_symbol_with_ascii_data_map()
: heap->external_symbol_map())
: (is_ascii ? heap->external_string_with_ascii_data_map()
: heap->external_string_map()));
} else {
- this->set_map(
+ this->set_map_no_write_barrier(
is_symbol
? (is_ascii ? heap->short_external_symbol_with_ascii_data_map()
: heap->short_external_symbol_map())
@@ -1011,11 +1011,13 @@ bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
// Morph the object to an external string by adjusting the map and
// reinitializing the fields. Use short version if space is limited.
if (size >= ExternalString::kSize) {
- this->set_map(is_symbol ? heap->external_ascii_symbol_map()
- : heap->external_ascii_string_map());
+ this->set_map_no_write_barrier(
+ is_symbol ? heap->external_ascii_symbol_map()
+ : heap->external_ascii_string_map());
} else {
- this->set_map(is_symbol ? heap->short_external_ascii_symbol_map()
- : heap->short_external_ascii_string_map());
+ this->set_map_no_write_barrier(
+ is_symbol ? heap->short_external_ascii_symbol_map()
+ : heap->short_external_ascii_string_map());
}
ExternalAsciiString* self = ExternalAsciiString::cast(this);
self->set_resource(resource);
@@ -1640,8 +1642,6 @@ MaybeObject* JSObject::AddConstantFunctionProperty(
String* name,
JSFunction* function,
PropertyAttributes attributes) {
- ASSERT(!GetHeap()->InNewSpace(function));
-
// Allocate new instance descriptors with (name, function) added
ConstantFunctionDescriptor d(name, function, attributes);
Object* new_descriptors;
@@ -1756,7 +1756,7 @@ MaybeObject* JSObject::AddProperty(String* name,
// Ensure the descriptor array does not get too big.
if (map_of_this->instance_descriptors()->number_of_descriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
- if (value->IsJSFunction() && !heap->InNewSpace(value)) {
+ if (value->IsJSFunction()) {
return AddConstantFunctionProperty(name,
JSFunction::cast(value),
attributes);
@@ -2995,7 +2995,6 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
ASSERT(target_descriptors->GetType(number) == CONSTANT_FUNCTION);
JSFunction* function =
JSFunction::cast(target_descriptors->GetValue(number));
- ASSERT(!HEAP->InNewSpace(function));
if (value == function) {
set_map(target_map);
return value;
@@ -4855,7 +4854,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// of the next map and recording the index in the transition array in
// the map field of the array.
Map* next = Map::cast(contents->get(i));
- next->set_map_unsafe(current);
+ next->set_map_no_write_barrier(current);
*map_or_index_field = Smi::FromInt(i + 2);
current = next;
map_done = false;
@@ -4880,7 +4879,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
Object* perhaps_map = prototype_transitions->get(i);
if (perhaps_map->IsMap()) {
Map* next = Map::cast(perhaps_map);
- next->set_map_unsafe(current);
+ next->set_map_no_write_barrier(current);
*proto_map_or_index_field =
Smi::FromInt(i + kProtoTransitionElementsPerEntry);
current = next;
@@ -4896,7 +4895,7 @@ void Map::TraverseTransitionTree(TraverseCallback callback, void* data) {
// the map field, which is being used to track the traversal and put the
// correct map (the meta_map) in place while we do the callback.
Map* prev = current->map();
- current->set_map_unsafe(meta_map);
+ current->set_map_no_write_barrier(meta_map);
callback(current, data);
current = prev;
}
@@ -5395,7 +5394,9 @@ MaybeObject* FixedArray::CopySize(int new_length) {
AssertNoAllocation no_gc;
int len = length();
if (new_length < len) len = new_length;
- result->set_map(map());
+ // We are taking the map from the old fixed array so the map is sure to
+ // be an immortal immutable object.
+ result->set_map_no_write_barrier(map());
WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
for (int i = 0; i < len; i++) {
result->set(i, get(i), mode);
@@ -5635,7 +5636,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
}
}
if (child_hash <= parent_hash) break;
- NoWriteBarrierSwapDescriptors(parent_index, child_index);
+ NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
// Now element at child_index could be < its children.
parent_index = child_index; // parent_hash remains correct.
}
@@ -5644,7 +5645,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
// Extract elements and create sorted array.
for (int i = len - 1; i > 0; --i) {
// Put max element at the back of the array.
- NoWriteBarrierSwapDescriptors(0, i);
+ NoIncrementalWriteBarrierSwapDescriptors(0, i);
// Shift down the new top element.
int parent_index = 0;
const uint32_t parent_hash = GetKey(parent_index)->Hash();
@@ -5660,7 +5661,7 @@ void DescriptorArray::SortUnchecked(const WhitenessWitness& witness) {
}
}
if (child_hash <= parent_hash) break;
- NoWriteBarrierSwapDescriptors(parent_index, child_index);
+ NoIncrementalWriteBarrierSwapDescriptors(parent_index, child_index);
parent_index = child_index;
}
}
@@ -7639,6 +7640,22 @@ void SharedFunctionInfo::CompleteInobjectSlackTracking() {
}
+#define DECLARE_TAG(ignore1, name, ignore2) name,
+const char* const VisitorSynchronization::kTags[
+ VisitorSynchronization::kNumberOfSyncTags] = {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
+#define DECLARE_TAG(ignore1, ignore2, name) name,
+const char* const VisitorSynchronization::kTagNames[
+ VisitorSynchronization::kNumberOfSyncTags] = {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_TAG)
+};
+#undef DECLARE_TAG
+
+
void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -8115,9 +8132,20 @@ void Code::Disassemble(const char* name, FILE* out) {
static void CopyFastElementsToFast(FixedArray* source,
FixedArray* destination,
WriteBarrierMode mode) {
- uint32_t count = static_cast<uint32_t>(source->length());
- for (uint32_t i = 0; i < count; ++i) {
- destination->set(i, source->get(i), mode);
+ int count = source->length();
+ int copy_size = Min(count, destination->length());
+ if (mode == SKIP_WRITE_BARRIER ||
+ !Page::FromAddress(destination->address())->IsFlagSet(
+ MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING)) {
+ Address to = destination->address() + FixedArray::kHeaderSize;
+ Address from = source->address() + FixedArray::kHeaderSize;
+ memcpy(reinterpret_cast<void*>(to),
+ reinterpret_cast<void*>(from),
+ kPointerSize * copy_size);
+ } else {
+ for (int i = 0; i < copy_size; ++i) {
+ destination->set(i, source->get(i), mode);
+ }
}
}
@@ -8125,11 +8153,14 @@ static void CopyFastElementsToFast(FixedArray* source,
static void CopySlowElementsToFast(NumberDictionary* source,
FixedArray* destination,
WriteBarrierMode mode) {
+ int destination_length = destination->length();
for (int i = 0; i < source->Capacity(); ++i) {
Object* key = source->KeyAt(i);
if (key->IsNumber()) {
uint32_t entry = static_cast<uint32_t>(key->Number());
- destination->set(entry, source->ValueAt(i), mode);
+ if (entry < static_cast<uint32_t>(destination_length)) {
+ destination->set(entry, source->ValueAt(i), mode);
+ }
}
}
}
@@ -8340,14 +8371,8 @@ MaybeObject* JSArray::Initialize(int capacity) {
void JSArray::Expand(int required_size) {
- Handle<JSArray> self(this);
- Handle<FixedArray> old_backing(FixedArray::cast(elements()));
- int old_size = old_backing->length();
- int new_size = required_size > old_size ? required_size : old_size;
- Handle<FixedArray> new_backing = FACTORY->NewFixedArray(new_size);
- // Can't use this any more now because we may have had a GC!
- for (int i = 0; i < old_size; i++) new_backing->set(i, old_backing->get(i));
- GetIsolate()->factory()->SetContent(self, new_backing);
+ GetIsolate()->factory()->SetElementsCapacityAndLength(
+ Handle<JSArray>(this), required_size, required_size);
}
@@ -8501,13 +8526,14 @@ MaybeObject* JSReceiver::SetPrototype(Object* value,
MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
uint32_t first_arg,
- uint32_t arg_count) {
+ uint32_t arg_count,
+ EnsureElementsMode mode) {
// Elements in |Arguments| are ordered backwards (because they're on the
// stack), but the method that's called here iterates over them in forward
// direction.
return EnsureCanContainElements(
args->arguments() - first_arg - (arg_count - 1),
- arg_count);
+ arg_count, mode);
}
@@ -9459,31 +9485,45 @@ MUST_USE_RESULT MaybeObject* JSObject::TransitionElementsKind(
FixedArrayBase* elms = FixedArrayBase::cast(elements());
uint32_t capacity = static_cast<uint32_t>(elms->length());
uint32_t length = capacity;
+
if (IsJSArray()) {
- CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ Object* raw_length = JSArray::cast(this)->length();
+ if (raw_length->IsUndefined()) {
+ // If length is undefined, then JSArray is being initialized and has no
+ // elements, assume a length of zero.
+ length = 0;
+ } else {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&length));
+ }
}
- if (from_kind == FAST_SMI_ONLY_ELEMENTS) {
- if (to_kind == FAST_DOUBLE_ELEMENTS) {
- MaybeObject* maybe_result =
- SetFastDoubleElementsCapacityAndLength(capacity, length);
- if (maybe_result->IsFailure()) return maybe_result;
- return this;
- } else if (to_kind == FAST_ELEMENTS) {
- MaybeObject* maybe_new_map = GetElementsTransitionMap(FAST_ELEMENTS);
- Map* new_map;
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- if (FLAG_trace_elements_transitions) {
- PrintElementsTransition(stdout, from_kind, elms, FAST_ELEMENTS, elms);
- }
- set_map(new_map);
- return this;
+
+ if ((from_kind == FAST_SMI_ONLY_ELEMENTS && to_kind == FAST_ELEMENTS) ||
+ (length == 0)) {
+ MaybeObject* maybe_new_map = GetElementsTransitionMap(to_kind);
+ Map* new_map;
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ if (FLAG_trace_elements_transitions) {
+ PrintElementsTransition(stdout, from_kind, elms, to_kind, elms);
}
- } else if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
+ set_map(new_map);
+ return this;
+ }
+
+ if (from_kind == FAST_SMI_ONLY_ELEMENTS &&
+ to_kind == FAST_DOUBLE_ELEMENTS) {
+ MaybeObject* maybe_result =
+ SetFastDoubleElementsCapacityAndLength(capacity, length);
+ if (maybe_result->IsFailure()) return maybe_result;
+ return this;
+ }
+
+ if (from_kind == FAST_DOUBLE_ELEMENTS && to_kind == FAST_ELEMENTS) {
MaybeObject* maybe_result = SetFastElementsCapacityAndLength(
capacity, length, kDontAllowSmiOnlyElements);
if (maybe_result->IsFailure()) return maybe_result;
return this;
}
+
// This method should never be called for any other case than the ones
// handled above.
UNREACHABLE();
@@ -10598,7 +10638,7 @@ class SymbolKey : public HashTableKey {
// Transform string to symbol if possible.
Map* map = heap->SymbolMapForString(string_);
if (map != NULL) {
- string_->set_map(map);
+ string_->set_map_no_write_barrier(map);
ASSERT(string_->IsSymbol());
return string_;
}
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 6c88cc01a..2b18e67c9 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -1131,7 +1131,10 @@ class HeapObject: public Object {
// information.
inline Map* map();
inline void set_map(Map* value);
- inline void set_map_unsafe(Map* value);
+ // The no-write-barrier version. This is OK if the object is white and in
+ // new space, or if the value is an immortal immutable object, like the maps
+ // of primitive (non-JS) objects like strings, heap numbers etc.
+ inline void set_map_no_write_barrier(Map* value);
// During garbage collection, the map word of a heap object does not
// necessarily contain a map pointer.
@@ -1319,6 +1322,13 @@ class HeapNumber: public HeapObject {
};
+enum EnsureElementsMode {
+ DONT_ALLOW_DOUBLE_ELEMENTS,
+ ALLOW_COPIED_DOUBLE_ELEMENTS,
+ ALLOW_CONVERTED_DOUBLE_ELEMENTS
+};
+
+
// JSReceiver includes types on which properties can be defined, i.e.,
// JSObject and JSProxy.
class JSReceiver: public HeapObject {
@@ -1612,16 +1622,19 @@ class JSObject: public JSReceiver {
inline void ValidateSmiOnlyElements();
- // Makes sure that this object can contain non-smi Object as elements.
- inline MaybeObject* EnsureCanContainNonSmiElements();
+ // Makes sure that this object can contain HeapObject as elements.
+ inline MaybeObject* EnsureCanContainHeapObjectElements();
// Makes sure that this object can contain the specified elements.
inline MaybeObject* EnsureCanContainElements(Object** elements,
- uint32_t count);
- inline MaybeObject* EnsureCanContainElements(FixedArray* elements);
+ uint32_t count,
+ EnsureElementsMode mode);
+ inline MaybeObject* EnsureCanContainElements(FixedArrayBase* elements,
+ EnsureElementsMode mode);
MaybeObject* EnsureCanContainElements(Arguments* arguments,
uint32_t first_arg,
- uint32_t arg_count);
+ uint32_t arg_count,
+ EnsureElementsMode mode);
// Do we want to keep the elements in fast case when increasing the
// capacity?
@@ -2121,6 +2134,9 @@ class FixedArray: public FixedArrayBase {
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2187,6 +2203,13 @@ class FixedArray: public FixedArrayBase {
int index,
Object* value);
+ // Set operation on FixedArray without incremental write barrier. Can
+ // only be used if the object is guaranteed to be white (whiteness witness
+ // is present).
+ static inline void NoIncrementalWriteBarrierSet(FixedArray* array,
+ int index,
+ Object* value);
+
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -2465,12 +2488,12 @@ class DescriptorArray: public FixedArray {
NULL_DESCRIPTOR;
}
// Swap operation on FixedArray without using write barriers.
- static inline void NoWriteBarrierSwap(FixedArray* array,
- int first,
- int second);
+ static inline void NoIncrementalWriteBarrierSwap(
+ FixedArray* array, int first, int second);
// Swap descriptor first and second.
- inline void NoWriteBarrierSwapDescriptors(int first, int second);
+ inline void NoIncrementalWriteBarrierSwapDescriptors(
+ int first, int second);
FixedArray* GetContentArray() {
return FixedArray::cast(get(kContentArrayIndex));
@@ -3738,11 +3761,6 @@ class DeoptimizationInputData: public FixedArray {
DEFINE_ELEMENT_ACCESSORS(OsrAstId, Smi)
DEFINE_ELEMENT_ACCESSORS(OsrPcOffset, Smi)
- // Unchecked accessor to be used during GC.
- FixedArray* UncheckedLiteralArray() {
- return reinterpret_cast<FixedArray*>(get(kLiteralArrayIndex));
- }
-
#undef DEFINE_ELEMENT_ACCESSORS
// Accessors for elements of the ith deoptimization entry.
@@ -7381,7 +7399,7 @@ class JSArray: public JSObject {
MUST_USE_RESULT MaybeObject* Initialize(int capacity);
// Set the content of the array to the content of storage.
- inline MaybeObject* SetContent(FixedArray* storage);
+ inline MaybeObject* SetContent(FixedArrayBase* storage);
// Casting.
static inline JSArray* cast(Object* obj);
@@ -7862,6 +7880,34 @@ class BreakPointInfo: public Struct {
#undef DECL_BOOLEAN_ACCESSORS
#undef DECL_ACCESSORS
+#define VISITOR_SYNCHRONIZATION_TAGS_LIST(V) \
+ V(kSymbolTable, "symbol_table", "(Symbols)") \
+ V(kExternalStringsTable, "external_strings_table", "(External strings)") \
+ V(kStrongRootList, "strong_root_list", "(Strong roots)") \
+ V(kSymbol, "symbol", "(Symbol)") \
+ V(kBootstrapper, "bootstrapper", "(Bootstrapper)") \
+ V(kTop, "top", "(Isolate)") \
+ V(kRelocatable, "relocatable", "(Relocatable)") \
+ V(kDebug, "debug", "(Debugger)") \
+ V(kCompilationCache, "compilationcache", "(Compilation cache)") \
+ V(kHandleScope, "handlescope", "(Handle scope)") \
+ V(kBuiltins, "builtins", "(Builtins)") \
+ V(kGlobalHandles, "globalhandles", "(Global handles)") \
+ V(kThreadManager, "threadmanager", "(Thread manager)") \
+ V(kExtensions, "Extensions", "(Extensions)")
+
+class VisitorSynchronization : public AllStatic {
+ public:
+#define DECLARE_ENUM(enum_item, ignore1, ignore2) enum_item,
+ enum SyncTag {
+ VISITOR_SYNCHRONIZATION_TAGS_LIST(DECLARE_ENUM)
+ kNumberOfSyncTags
+ };
+#undef DECLARE_ENUM
+
+ static const char* const kTags[kNumberOfSyncTags];
+ static const char* const kTagNames[kNumberOfSyncTags];
+};
// Abstract base class for visiting, and optionally modifying, the
// pointers contained in Objects. Used in GC and serialization/deserialization.
@@ -7917,13 +7963,10 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a handle that has an embedder-assigned class ID.
virtual void VisitEmbedderReference(Object** p, uint16_t class_id) {}
-#ifdef DEBUG
// Intended for serialization/deserialization checking: insert, or
// check for the presence of, a tag at this position in the stream.
- virtual void Synchronize(const char* tag) {}
-#else
- inline void Synchronize(const char* tag) {}
-#endif
+ // Also used for marking up GC roots in heap snapshots.
+ virtual void Synchronize(VisitorSynchronization::SyncTag tag) {}
};
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index d834acb38..c1681cfbd 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -2158,6 +2158,20 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
// reported (underlining).
Expect(Token::RETURN, CHECK_OK);
+ Token::Value tok = peek();
+ Statement* result;
+ if (scanner().HasAnyLineTerminatorBeforeNext() ||
+ tok == Token::SEMICOLON ||
+ tok == Token::RBRACE ||
+ tok == Token::EOS) {
+ ExpectSemicolon(CHECK_OK);
+ result = new(zone()) ReturnStatement(GetLiteralUndefined());
+ } else {
+ Expression* expr = ParseExpression(true, CHECK_OK);
+ ExpectSemicolon(CHECK_OK);
+ result = new(zone()) ReturnStatement(expr);
+ }
+
// An ECMAScript program is considered syntactically incorrect if it
// contains a return statement that is not within the body of a
// function. See ECMA-262, section 12.9, page 67.
@@ -2170,19 +2184,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
Expression* throw_error = NewThrowSyntaxError(type, Handle<Object>::null());
return new(zone()) ExpressionStatement(throw_error);
}
-
- Token::Value tok = peek();
- if (scanner().HasAnyLineTerminatorBeforeNext() ||
- tok == Token::SEMICOLON ||
- tok == Token::RBRACE ||
- tok == Token::EOS) {
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ReturnStatement(GetLiteralUndefined());
- }
-
- Expression* expr = ParseExpression(true, CHECK_OK);
- ExpectSemicolon(CHECK_OK);
- return new(zone()) ReturnStatement(expr);
+ return result;
}
@@ -2693,6 +2695,7 @@ Expression* Parser::ParseAssignmentExpression(bool accept_IN, bool* ok) {
// Assignment to eval or arguments is disallowed in strict mode.
CheckStrictModeLValue(expression, "strict_lhs_assignment", CHECK_OK);
}
+ MarkAsLValue(expression);
Token::Value op = Next(); // Get assignment operator.
int pos = scanner().location().beg_pos;
@@ -2926,6 +2929,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
// Prefix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
+ MarkAsLValue(expression);
int position = scanner().location().beg_pos;
return new(zone()) CountOperation(isolate(),
@@ -2961,6 +2965,7 @@ Expression* Parser::ParsePostfixExpression(bool* ok) {
// Postfix expression operand in strict mode may not be eval or arguments.
CheckStrictModeLValue(expression, "strict_lhs_prefix", CHECK_OK);
}
+ MarkAsLValue(expression);
Token::Value next = Next();
int position = scanner().location().beg_pos;
@@ -3375,6 +3380,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
isolate()->factory()->NewFixedArray(values->length(), TENURED);
Handle<FixedDoubleArray> double_literals;
ElementsKind elements_kind = FAST_SMI_ONLY_ELEMENTS;
+ bool has_only_undefined_values = true;
// Fill in the literals.
bool is_simple = true;
@@ -3398,6 +3404,7 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
// FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
// the tagged value, no matter what the ElementsKind is in case we
// ultimately end up in FAST_ELEMENTS.
+ has_only_undefined_values = false;
object_literals->set(i, *boilerplate_value);
if (elements_kind == FAST_SMI_ONLY_ELEMENTS) {
// Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
@@ -3436,6 +3443,13 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
}
+ // Very small array literals that don't have a concrete hint about their type
+ // from a constant value should default to the slow case to avoid lots of
+ // elements transitions on really small objects.
+ if (has_only_undefined_values && values->length() <= 2) {
+ elements_kind = FAST_ELEMENTS;
+ }
+
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
@@ -4479,6 +4493,15 @@ Handle<String> Parser::ParseIdentifierName(bool* ok) {
}
+void Parser::MarkAsLValue(Expression* expression) {
+ VariableProxy* proxy = expression != NULL
+ ? expression->AsVariableProxy()
+ : NULL;
+
+ if (proxy != NULL) proxy->MarkAsLValue();
+}
+
+
// Checks LHS expression for assignment and prefix/postfix increment/decrement
// in strict mode.
void Parser::CheckStrictModeLValue(Expression* expression,
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 75f8e1093..146d7bb9a 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -661,6 +661,11 @@ class Parser {
bool* is_set,
bool* ok);
+ // Determine if the expression is a variable proxy and mark it as being used
+ // in an assignment or with a increment/decrement operator. This is currently
+ // used on for the statically checking assignments to harmony const bindings.
+ void MarkAsLValue(Expression* expression);
+
// Strict mode validation of LValue expressions
void CheckStrictModeLValue(Expression* expression,
const char* error,
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index cccf0acb3..a59d0bbdc 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -70,6 +70,11 @@ intptr_t OS::MaxVirtualMemory() {
}
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
+
+
#ifndef __CYGWIN__
// Get rid of writable permission on code allocations.
void OS::ProtectCode(void* address, const size_t size) {
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index 8771c4367..822f36064 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -889,6 +889,11 @@ void OS::Free(void* address, const size_t size) {
}
+intptr_t OS::CommitPageSize() {
+ return 4096;
+}
+
+
void OS::ProtectCode(void* address, const size_t size) {
DWORD old_protect;
VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index f84b6b17a..127f788f9 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -172,6 +172,10 @@ class OS {
bool is_executable);
static void Free(void* address, const size_t size);
+ // This is the granularity at which the ProtectCode(...) call can set page
+ // permissions.
+ static intptr_t CommitPageSize();
+
// Mark code segments non-writable.
static void ProtectCode(void* address, const size_t size);
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index 49cadb661..b36f4faca 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -627,6 +627,7 @@ PreParser::Statement PreParser::ParseDoWhileStatement(bool* ok) {
Expect(i::Token::LPAREN, CHECK_OK);
ParseExpression(true, CHECK_OK);
Expect(i::Token::RPAREN, ok);
+ if (peek() == i::Token::SEMICOLON) Consume(i::Token::SEMICOLON);
return Statement::Default();
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index 88d6e8794..7a70b013b 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -95,6 +95,26 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
}
+uint64_t HeapObjectsMap::GetNthGcSubrootId(int delta) {
+ return kGcRootsFirstSubrootId + delta * kObjectIdStep;
+}
+
+
+HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
+ return reinterpret_cast<HeapObject*>(
+ reinterpret_cast<char*>(kFirstGcSubrootObject) +
+ delta * HeapObjectsMap::kObjectIdStep);
+}
+
+
+int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
+ return static_cast<int>(
+ (reinterpret_cast<char*>(subroot) -
+ reinterpret_cast<char*>(kFirstGcSubrootObject)) /
+ HeapObjectsMap::kObjectIdStep);
+}
+
+
uint64_t HeapEntry::id() {
union {
Id stored_id;
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 5626acaba..a46122be6 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -938,7 +938,7 @@ void HeapGraphEdge::Init(
void HeapGraphEdge::Init(int child_index, Type type, int index, HeapEntry* to) {
- ASSERT(type == kElement || type == kHidden);
+ ASSERT(type == kElement || type == kHidden || type == kWeak);
child_index_ = child_index;
type_ = type;
index_ = index;
@@ -1053,8 +1053,11 @@ void HeapEntry::PaintAllReachable() {
}
-void HeapEntry::Print(int max_depth, int indent) {
- OS::Print("%6d %6d [%llu] ", self_size(), RetainedSize(false), id());
+void HeapEntry::Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent) {
+ OS::Print("%6d %7d @%6llu %*c %s%s: ",
+ self_size(), RetainedSize(false), id(),
+ indent, ' ', prefix, edge_name);
if (type() != kString) {
OS::Print("%s %.40s\n", TypeAsString(), name_);
} else {
@@ -1073,29 +1076,40 @@ void HeapEntry::Print(int max_depth, int indent) {
Vector<HeapGraphEdge> ch = children();
for (int i = 0; i < ch.length(); ++i) {
HeapGraphEdge& edge = ch[i];
+ const char* edge_prefix = "";
+ ScopedVector<char> index(64);
+ const char* edge_name = index.start();
switch (edge.type()) {
case HeapGraphEdge::kContextVariable:
- OS::Print(" %*c #%s: ", indent, ' ', edge.name());
+ edge_prefix = "#";
+ edge_name = edge.name();
break;
case HeapGraphEdge::kElement:
- OS::Print(" %*c %d: ", indent, ' ', edge.index());
+ OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kInternal:
- OS::Print(" %*c $%s: ", indent, ' ', edge.name());
+ edge_prefix = "$";
+ edge_name = edge.name();
break;
case HeapGraphEdge::kProperty:
- OS::Print(" %*c %s: ", indent, ' ', edge.name());
+ edge_name = edge.name();
break;
case HeapGraphEdge::kHidden:
- OS::Print(" %*c $%d: ", indent, ' ', edge.index());
+ edge_prefix = "$";
+ OS::SNPrintF(index, "%d", edge.index());
break;
case HeapGraphEdge::kShortcut:
- OS::Print(" %*c ^%s: ", indent, ' ', edge.name());
+ edge_prefix = "^";
+ edge_name = edge.name();
+ break;
+ case HeapGraphEdge::kWeak:
+ edge_prefix = "w";
+ OS::SNPrintF(index, "%d", edge.index());
break;
default:
- OS::Print("!!! unknown edge type: %d ", edge.type());
+ OS::SNPrintF(index, "!!! unknown edge type: %d ", edge.type());
}
- edge.to()->Print(max_depth, indent + 2);
+ edge.to()->Print(edge_prefix, edge_name, max_depth, indent + 2);
}
}
@@ -1215,6 +1229,9 @@ HeapSnapshot::HeapSnapshot(HeapSnapshotsCollection* collection,
STATIC_ASSERT(
sizeof(HeapEntry) ==
SnapshotSizeConstants<sizeof(void*)>::kExpectedHeapEntrySize); // NOLINT
+ for (int i = 0; i < VisitorSynchronization::kNumberOfSyncTags; ++i) {
+ gc_subroot_entries_[i] = NULL;
+ }
}
HeapSnapshot::~HeapSnapshot() {
@@ -1270,6 +1287,21 @@ HeapEntry* HeapSnapshot::AddGcRootsEntry(int children_count,
}
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count) {
+ ASSERT(gc_subroot_entries_[tag] == NULL);
+ ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+ return (gc_subroot_entries_[tag] = AddEntry(
+ HeapEntry::kObject,
+ VisitorSynchronization::kTagNames[tag],
+ HeapObjectsMap::GetNthGcSubrootId(tag),
+ 0,
+ children_count,
+ retainers_count));
+}
+
+
HeapEntry* HeapSnapshot::AddNativesRootEntry(int children_count,
int retainers_count) {
ASSERT(natives_root_entry_ == NULL);
@@ -1355,17 +1387,22 @@ List<HeapEntry*>* HeapSnapshot::GetSortedEntriesList() {
void HeapSnapshot::Print(int max_depth) {
- root()->Print(max_depth, 0);
+ root()->Print("", "", max_depth, 0);
}
// We split IDs on evens for embedder objects (see
// HeapObjectsMap::GenerateId) and odds for native objects.
const uint64_t HeapObjectsMap::kInternalRootObjectId = 1;
-const uint64_t HeapObjectsMap::kGcRootsObjectId = 3;
-const uint64_t HeapObjectsMap::kNativesRootObjectId = 5;
-// Increase kFirstAvailableObjectId if new 'special' objects appear.
-const uint64_t HeapObjectsMap::kFirstAvailableObjectId = 7;
+const uint64_t HeapObjectsMap::kGcRootsObjectId =
+ HeapObjectsMap::kInternalRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kNativesRootObjectId =
+ HeapObjectsMap::kGcRootsObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kGcRootsFirstSubrootId =
+ HeapObjectsMap::kNativesRootObjectId + HeapObjectsMap::kObjectIdStep;
+const uint64_t HeapObjectsMap::kFirstAvailableObjectId =
+ HeapObjectsMap::kGcRootsFirstSubrootId +
+ VisitorSynchronization::kNumberOfSyncTags * HeapObjectsMap::kObjectIdStep;
HeapObjectsMap::HeapObjectsMap()
: initial_fill_mode_(true),
@@ -1391,7 +1428,7 @@ uint64_t HeapObjectsMap::FindObject(Address addr) {
if (existing != 0) return existing;
}
uint64_t id = next_id_;
- next_id_ += 2;
+ next_id_ += kObjectIdStep;
AddEntry(addr, id);
return id;
}
@@ -1684,6 +1721,12 @@ HeapObject *const V8HeapExplorer::kInternalRootObject =
HeapObject *const V8HeapExplorer::kGcRootsObject =
reinterpret_cast<HeapObject*>(
static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
+HeapObject *const V8HeapExplorer::kFirstGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
+HeapObject *const V8HeapExplorer::kLastGcSubrootObject =
+ reinterpret_cast<HeapObject*>(
+ static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
V8HeapExplorer::V8HeapExplorer(
@@ -1716,6 +1759,11 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
return snapshot_->AddRootEntry(children_count);
} else if (object == kGcRootsObject) {
return snapshot_->AddGcRootsEntry(children_count, retainers_count);
+ } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
+ return snapshot_->AddGcSubrootEntry(
+ GetGcSubrootOrder(object),
+ children_count,
+ retainers_count);
} else if (object->IsJSGlobalObject()) {
const char* tag = objects_tags_.GetTag(object);
const char* name = collection_->names()->GetName(
@@ -1779,6 +1827,18 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
: "",
children_count,
retainers_count);
+ } else if (object->IsGlobalContext()) {
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / GlobalContext",
+ children_count,
+ retainers_count);
+ } else if (object->IsContext()) {
+ return AddEntry(object,
+ HeapEntry::kHidden,
+ "system / Context",
+ children_count,
+ retainers_count);
} else if (object->IsFixedArray() ||
object->IsFixedDoubleArray() ||
object->IsByteArray() ||
@@ -1818,9 +1878,38 @@ HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object,
}
+class GcSubrootsEnumerator : public ObjectVisitor {
+ public:
+ GcSubrootsEnumerator(
+ SnapshotFillerInterface* filler, V8HeapExplorer* explorer)
+ : filler_(filler),
+ explorer_(explorer),
+ previous_object_count_(0),
+ object_count_(0) {
+ }
+ void VisitPointers(Object** start, Object** end) {
+ object_count_ += end - start;
+ }
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ // Skip empty subroots.
+ if (previous_object_count_ != object_count_) {
+ previous_object_count_ = object_count_;
+ filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
+ }
+ }
+ private:
+ SnapshotFillerInterface* filler_;
+ V8HeapExplorer* explorer_;
+ intptr_t previous_object_count_;
+ intptr_t object_count_;
+};
+
+
void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
filler->AddEntry(kInternalRootObject, this);
filler->AddEntry(kGcRootsObject, this);
+ GcSubrootsEnumerator enumerator(filler, this);
+ heap_->IterateRoots(&enumerator, VISIT_ALL);
}
@@ -1939,6 +2028,11 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"literals_or_bindings",
js_fun->literals_or_bindings(),
JSFunction::kLiteralsOffset);
+ for (int i = JSFunction::kNonWeakFieldsEndOffset;
+ i < JSFunction::kSize;
+ i += kPointerSize) {
+ SetWeakReference(js_fun, entry, i, *HeapObject::RawField(js_fun, i), i);
+ }
}
TagObject(js_obj->properties(), "(object properties)");
SetInternalReference(obj, entry,
@@ -1965,8 +2059,14 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->map_cache(), "(context map cache)");
TagObject(context->data(), "(context data)");
+ for (int i = Context::FIRST_WEAK_SLOT;
+ i < Context::GLOBAL_CONTEXT_SLOTS;
+ ++i) {
+ SetWeakReference(obj, entry,
+ i, context->get(i),
+ FixedArray::OffsetOfElementAt(i));
+ }
} else if (obj->IsMap()) {
Map* map = Map::cast(obj);
SetInternalReference(obj, entry,
@@ -2009,6 +2109,9 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
SetInternalReference(obj, entry,
"script", shared->script(),
SharedFunctionInfo::kScriptOffset);
+ SetWeakReference(obj, entry,
+ 1, shared->initial_map(),
+ SharedFunctionInfo::kInitialMapOffset);
} else if (obj->IsScript()) {
Script* script = Script::cast(obj);
SetInternalReference(obj, entry,
@@ -2235,15 +2338,66 @@ HeapEntry* V8HeapExplorer::GetEntry(Object* obj) {
class RootsReferencesExtractor : public ObjectVisitor {
+ private:
+ struct IndexTag {
+ IndexTag(int index, VisitorSynchronization::SyncTag tag)
+ : index(index), tag(tag) { }
+ int index;
+ VisitorSynchronization::SyncTag tag;
+ };
+
public:
- explicit RootsReferencesExtractor(V8HeapExplorer* explorer)
- : explorer_(explorer) {
+ RootsReferencesExtractor()
+ : collecting_all_references_(false),
+ previous_reference_count_(0) {
}
+
void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) explorer_->SetGcRootsReference(*p);
+ if (collecting_all_references_) {
+ for (Object** p = start; p < end; p++) all_references_.Add(*p);
+ } else {
+ for (Object** p = start; p < end; p++) strong_references_.Add(*p);
+ }
+ }
+
+ void SetCollectingAllReferences() { collecting_all_references_ = true; }
+
+ void FillReferences(V8HeapExplorer* explorer) {
+ ASSERT(strong_references_.length() <= all_references_.length());
+ for (int i = 0; i < reference_tags_.length(); ++i) {
+ explorer->SetGcRootsReference(reference_tags_[i].tag);
+ }
+ int strong_index = 0, all_index = 0, tags_index = 0;
+ while (all_index < all_references_.length()) {
+ if (strong_index < strong_references_.length() &&
+ strong_references_[strong_index] == all_references_[all_index]) {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ false,
+ all_references_[all_index++]);
+ ++strong_index;
+ } else {
+ explorer->SetGcSubrootReference(reference_tags_[tags_index].tag,
+ true,
+ all_references_[all_index++]);
+ }
+ if (reference_tags_[tags_index].index == all_index) ++tags_index;
+ }
+ }
+
+ void Synchronize(VisitorSynchronization::SyncTag tag) {
+ if (collecting_all_references_ &&
+ previous_reference_count_ != all_references_.length()) {
+ previous_reference_count_ = all_references_.length();
+ reference_tags_.Add(IndexTag(previous_reference_count_, tag));
+ }
}
+
private:
- V8HeapExplorer* explorer_;
+ bool collecting_all_references_;
+ List<Object*> strong_references_;
+ List<Object*> all_references_;
+ int previous_reference_count_;
+ List<IndexTag> reference_tags_;
};
@@ -2268,8 +2422,11 @@ bool V8HeapExplorer::IterateAndExtractReferences(
return false;
}
SetRootGcRootsReference();
- RootsReferencesExtractor extractor(this);
+ RootsReferencesExtractor extractor;
+ heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
+ extractor.SetCollectingAllReferences();
heap_->IterateRoots(&extractor, VISIT_ALL);
+ extractor.FillReferences(this);
filler_ = NULL;
return progress_->ProgressReport(false);
}
@@ -2359,6 +2516,24 @@ void V8HeapExplorer::SetHiddenReference(HeapObject* parent_obj,
}
+void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset) {
+ HeapEntry* child_entry = GetEntry(child_obj);
+ if (child_entry != NULL) {
+ filler_->SetIndexedReference(HeapGraphEdge::kWeak,
+ parent_obj,
+ parent_entry,
+ index,
+ child_obj,
+ child_entry);
+ IndexedReferencesExtractor::MarkVisitedField(parent_obj, field_offset);
+ }
+}
+
+
void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent_entry,
String* reference_name,
@@ -2421,12 +2596,21 @@ void V8HeapExplorer::SetRootShortcutReference(Object* child_obj) {
}
-void V8HeapExplorer::SetGcRootsReference(Object* child_obj) {
+void V8HeapExplorer::SetGcRootsReference(VisitorSynchronization::SyncTag tag) {
+ filler_->SetIndexedAutoIndexReference(
+ HeapGraphEdge::kElement,
+ kGcRootsObject, snapshot_->gc_roots(),
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag));
+}
+
+
+void V8HeapExplorer::SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child_obj) {
HeapEntry* child_entry = GetEntry(child_obj);
if (child_entry != NULL) {
filler_->SetIndexedAutoIndexReference(
- HeapGraphEdge::kElement,
- kGcRootsObject, snapshot_->gc_roots(),
+ is_weak ? HeapGraphEdge::kWeak : HeapGraphEdge::kElement,
+ GetNthGcSubrootObject(tag), snapshot_->gc_subroot(tag),
child_obj, child_entry);
}
}
@@ -3235,7 +3419,8 @@ void HeapSnapshotJSONSerializer::SerializeEdge(HeapGraphEdge* edge) {
writer_->AddNumber(edge->type());
writer_->AddCharacter(',');
if (edge->type() == HeapGraphEdge::kElement
- || edge->type() == HeapGraphEdge::kHidden) {
+ || edge->type() == HeapGraphEdge::kHidden
+ || edge->type() == HeapGraphEdge::kWeak) {
writer_->AddNumber(edge->index());
} else {
writer_->AddNumber(GetStringId(edge->name()));
@@ -3315,7 +3500,8 @@ void HeapSnapshotJSONSerializer::SerializeNodes() {
"," JSON_S("property")
"," JSON_S("internal")
"," JSON_S("hidden")
- "," JSON_S("shortcut"))
+ "," JSON_S("shortcut")
+ "," JSON_S("weak"))
"," JSON_S("string_or_number")
"," JSON_S("node"))))));
#undef JSON_S
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index 44be3db78..b47ce8255 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -455,7 +455,8 @@ class HeapGraphEdge BASE_EMBEDDED {
kProperty = v8::HeapGraphEdge::kProperty,
kInternal = v8::HeapGraphEdge::kInternal,
kHidden = v8::HeapGraphEdge::kHidden,
- kShortcut = v8::HeapGraphEdge::kShortcut
+ kShortcut = v8::HeapGraphEdge::kShortcut,
+ kWeak = v8::HeapGraphEdge::kWeak
};
HeapGraphEdge() { }
@@ -465,7 +466,7 @@ class HeapGraphEdge BASE_EMBEDDED {
Type type() { return static_cast<Type>(type_); }
int index() {
- ASSERT(type_ == kElement || type_ == kHidden);
+ ASSERT(type_ == kElement || type_ == kHidden || type_ == kWeak);
return index_;
}
const char* name() {
@@ -588,7 +589,8 @@ class HeapEntry BASE_EMBEDDED {
int EntrySize() { return EntriesSize(1, children_count_, retainers_count_); }
int RetainedSize(bool exact);
- void Print(int max_depth, int indent);
+ void Print(
+ const char* prefix, const char* edge_name, int max_depth, int indent);
Handle<HeapObject> GetHeapObject();
@@ -661,6 +663,7 @@ class HeapSnapshot {
HeapEntry* root() { return root_entry_; }
HeapEntry* gc_roots() { return gc_roots_entry_; }
HeapEntry* natives_root() { return natives_root_entry_; }
+ HeapEntry* gc_subroot(int index) { return gc_subroot_entries_[index]; }
List<HeapEntry*>* entries() { return &entries_; }
int raw_entries_size() { return raw_entries_size_; }
@@ -674,6 +677,9 @@ class HeapSnapshot {
int retainers_count);
HeapEntry* AddRootEntry(int children_count);
HeapEntry* AddGcRootsEntry(int children_count, int retainers_count);
+ HeapEntry* AddGcSubrootEntry(int tag,
+ int children_count,
+ int retainers_count);
HeapEntry* AddNativesRootEntry(int children_count, int retainers_count);
void ClearPaint();
HeapEntry* GetEntryById(uint64_t id);
@@ -695,6 +701,7 @@ class HeapSnapshot {
HeapEntry* root_entry_;
HeapEntry* gc_roots_entry_;
HeapEntry* natives_root_entry_;
+ HeapEntry* gc_subroot_entries_[VisitorSynchronization::kNumberOfSyncTags];
char* raw_entries_;
List<HeapEntry*> entries_;
bool entries_sorted_;
@@ -716,10 +723,13 @@ class HeapObjectsMap {
void MoveObject(Address from, Address to);
static uint64_t GenerateId(v8::RetainedObjectInfo* info);
+ static inline uint64_t GetNthGcSubrootId(int delta);
+ static const int kObjectIdStep = 2;
static const uint64_t kInternalRootObjectId;
static const uint64_t kGcRootsObjectId;
static const uint64_t kNativesRootObjectId;
+ static const uint64_t kGcRootsFirstSubrootId;
static const uint64_t kFirstAvailableObjectId;
private:
@@ -969,6 +979,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapEntry* parent,
int index,
Object* child);
+ void SetWeakReference(HeapObject* parent_obj,
+ HeapEntry* parent_entry,
+ int index,
+ Object* child_obj,
+ int field_offset);
void SetPropertyReference(HeapObject* parent_obj,
HeapEntry* parent,
String* reference_name,
@@ -981,11 +996,16 @@ class V8HeapExplorer : public HeapEntriesAllocator {
Object* child);
void SetRootShortcutReference(Object* child);
void SetRootGcRootsReference();
- void SetGcRootsReference(Object* child);
+ void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
+ void SetGcSubrootReference(
+ VisitorSynchronization::SyncTag tag, bool is_weak, Object* child);
void TagObject(Object* obj, const char* tag);
HeapEntry* GetEntry(Object* obj);
+ static inline HeapObject* GetNthGcSubrootObject(int delta);
+ static inline int GetGcSubrootOrder(HeapObject* subroot);
+
Heap* heap_;
HeapSnapshot* snapshot_;
HeapSnapshotsCollection* collection_;
@@ -994,8 +1014,11 @@ class V8HeapExplorer : public HeapEntriesAllocator {
HeapObjectsSet objects_tags_;
static HeapObject* const kGcRootsObject;
+ static HeapObject* const kFirstGcSubrootObject;
+ static HeapObject* const kLastGcSubrootObject;
friend class IndexedReferencesExtractor;
+ friend class GcSubrootsEnumerator;
friend class RootsReferencesExtractor;
DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index a2e569b31..f3adc51ac 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -625,6 +625,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateArrayLiteralShallow) {
// Check if boilerplate exists. If not, create it first.
Handle<Object> boilerplate(literals->get(literals_index), isolate);
if (*boilerplate == isolate->heap()->undefined_value()) {
+ ASSERT(*elements != isolate->heap()->empty_fixed_array());
boilerplate = CreateArrayLiteralBoilerplate(isolate, literals, elements);
if (boilerplate.is_null()) return Failure::Exception();
// Update the functions literal and return the boilerplate.
@@ -4651,6 +4652,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
if (value->IsNumber()) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS);
TransitionElementsKind(object, FAST_DOUBLE_ELEMENTS);
+ TransitionElementsKind(boilerplate_object, FAST_DOUBLE_ELEMENTS);
ASSERT(object->GetElementsKind() == FAST_DOUBLE_ELEMENTS);
FixedDoubleArray* double_array =
FixedDoubleArray::cast(object->elements());
@@ -4660,6 +4662,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StoreArrayLiteralElement) {
ASSERT(elements_kind == FAST_SMI_ONLY_ELEMENTS ||
elements_kind == FAST_DOUBLE_ELEMENTS);
TransitionElementsKind(object, FAST_ELEMENTS);
+ TransitionElementsKind(boilerplate_object, FAST_ELEMENTS);
FixedArray* object_array =
FixedArray::cast(object->elements());
object_array->set(store_index, *value);
@@ -6293,7 +6296,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringSplit) {
int part_count = indices.length();
Handle<JSArray> result = isolate->factory()->NewJSArray(part_count);
- MaybeObject* maybe_result = result->EnsureCanContainNonSmiElements();
+ MaybeObject* maybe_result = result->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
result->set_length(Smi::FromInt(part_count));
@@ -6669,7 +6672,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
// This assumption is used by the slice encoding in one or two smis.
ASSERT(Smi::kMaxValue >= String::kMaxLength);
- MaybeObject* maybe_result = array->EnsureCanContainNonSmiElements();
+ MaybeObject* maybe_result = array->EnsureCanContainHeapObjectElements();
if (maybe_result->IsFailure()) return maybe_result;
int special_length = special->length();
@@ -7395,7 +7398,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_log) {
return isolate->transcendental_cache()->Get(TranscendentalCache::LOG, x);
}
-
+// Slow version of Math.pow. We check for fast paths for special cases.
+// Used if SSE2/VFP3 is not available.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
@@ -7411,22 +7415,36 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow) {
}
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
- return isolate->heap()->AllocateHeapNumber(power_double_double(x, y));
+ int y_int = static_cast<int>(y);
+ double result;
+ if (y == y_int) {
+ result = power_double_int(x, y_int); // Returns 1 if exponent is 0.
+ } else if (y == 0.5) {
+ result = (isinf(x)) ? V8_INFINITY : sqrt(x + 0.0); // Convert -0 to +0.
+ } else if (y == -0.5) {
+ result = (isinf(x)) ? 0 : 1.0 / sqrt(x + 0.0); // Convert -0 to +0.
+ } else {
+ result = power_double_double(x, y);
+ }
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
-// Fast version of Math.pow if we know that y is not an integer and
-// y is not -0.5 or 0.5. Used as slowcase from codegen.
+// Fast version of Math.pow if we know that y is not an integer and y is not
+// -0.5 or 0.5. Used as slow case from fullcodegen.
RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_pow_cfunction) {
NoHandleAllocation ha;
ASSERT(args.length() == 2);
+ isolate->counters()->math_pow()->Increment();
+
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
CONVERT_DOUBLE_ARG_CHECKED(y, 1);
if (y == 0) {
return Smi::FromInt(1);
- } else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
- return isolate->heap()->nan_value();
} else {
- return isolate->heap()->AllocateHeapNumber(pow(x, y));
+ double result = power_double_double(x, y);
+ if (isnan(result)) return isolate->heap()->nan_value();
+ return isolate->heap()->AllocateHeapNumber(result);
}
}
@@ -7991,7 +8009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NewStrictArgumentsFast) {
AssertNoAllocation no_gc;
FixedArray* array = reinterpret_cast<FixedArray*>(obj);
- array->set_map(isolate->heap()->fixed_array_map());
+ array->set_map_no_write_barrier(isolate->heap()->fixed_array_map());
array->set_length(length);
WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
@@ -8111,7 +8129,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_FunctionBindArguments) {
for (int j = 0; j < argc; j++, i++) {
new_bindings->set(i, *arguments[j + 1]);
}
- new_bindings->set_map(isolate->heap()->fixed_cow_array_map());
+ new_bindings->set_map_no_write_barrier(
+ isolate->heap()->fixed_cow_array_map());
bound_function->set_function_bindings(*new_bindings);
// Update length.
@@ -9299,7 +9318,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DateParseString) {
CONVERT_ARG_CHECKED(JSArray, output, 1);
MaybeObject* maybe_result_array =
- output->EnsureCanContainNonSmiElements();
+ output->EnsureCanContainHeapObjectElements();
if (maybe_result_array->IsFailure()) return maybe_result_array;
RUNTIME_ASSERT(output->HasFastElements());
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index e05ca1725..4a6d0a7fa 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -31,6 +31,7 @@
#include "bootstrapper.h"
#include "compiler.h"
+#include "messages.h"
#include "scopeinfo.h"
#include "allocation-inl.h"
@@ -284,8 +285,25 @@ bool Scope::Analyze(CompilationInfo* info) {
}
#endif
+ if (FLAG_harmony_scoping) {
+ VariableProxy* proxy = scope->CheckAssignmentToConst();
+ if (proxy != NULL) {
+ // Found an assignment to const. Throw a syntax error.
+ MessageLocation location(info->script(),
+ proxy->position(),
+ proxy->position());
+ Isolate* isolate = info->isolate();
+ Factory* factory = isolate->factory();
+ Handle<JSArray> array = factory->NewJSArray(0);
+ Handle<Object> result =
+ factory->NewSyntaxError("harmony_const_assign", array);
+ isolate->Throw(*result, &location);
+ return false;
+ }
+ }
+
info->SetScope(scope);
- return true; // Can not fail.
+ return true;
}
@@ -554,6 +572,29 @@ Declaration* Scope::CheckConflictingVarDeclarations() {
}
+VariableProxy* Scope::CheckAssignmentToConst() {
+ // Check this scope.
+ if (is_extended_mode()) {
+ for (int i = 0; i < unresolved_.length(); i++) {
+ ASSERT(unresolved_[i]->var() != NULL);
+ if (unresolved_[i]->var()->is_const_mode() &&
+ unresolved_[i]->IsLValue()) {
+ return unresolved_[i];
+ }
+ }
+ }
+
+ // Check inner scopes.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ VariableProxy* proxy = inner_scopes_[i]->CheckAssignmentToConst();
+ if (proxy != NULL) return proxy;
+ }
+
+ // No assignments to const found.
+ return NULL;
+}
+
+
void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ZoneList<Variable*>* context_locals) {
ASSERT(stack_locals != NULL);
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 523a251fa..af0449e93 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -187,6 +187,11 @@ class Scope: public ZoneObject {
// scope over a let binding of the same name.
Declaration* CheckConflictingVarDeclarations();
+ // For harmony block scoping mode: Check if the scope has variable proxies
+ // that are used as lvalues and point to const variables. Assumes that scopes
+ // have been analyzed and variables been resolved.
+ VariableProxy* CheckAssignmentToConst();
+
// ---------------------------------------------------------------------------
// Scope-specific info.
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index 1be81dde3..a5712a0f5 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -1656,14 +1656,14 @@ void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
// field and a next pointer, we give it a filler map that gives it the
// correct size.
if (size_in_bytes > FreeSpace::kHeaderSize) {
- set_map_unsafe(heap->raw_unchecked_free_space_map());
+ set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
// Can't use FreeSpace::cast because it fails during deserialization.
FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
this_as_free_space->set_size(size_in_bytes);
} else if (size_in_bytes == kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_one_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
} else if (size_in_bytes == 2 * kPointerSize) {
- set_map_unsafe(heap->raw_unchecked_two_pointer_filler_map());
+ set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
} else {
UNREACHABLE();
}
diff --git a/deps/v8/src/store-buffer.cc b/deps/v8/src/store-buffer.cc
index 7c8b5f207..c0315f27a 100644
--- a/deps/v8/src/store-buffer.cc
+++ b/deps/v8/src/store-buffer.cc
@@ -41,6 +41,7 @@ StoreBuffer::StoreBuffer(Heap* heap)
old_start_(NULL),
old_limit_(NULL),
old_top_(NULL),
+ old_reserved_limit_(NULL),
old_buffer_is_sorted_(false),
old_buffer_is_filtered_(false),
during_gc_(false),
@@ -59,10 +60,25 @@ void StoreBuffer::Setup() {
reinterpret_cast<uintptr_t>(virtual_memory_->address());
start_ =
reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
- limit_ = start_ + (kStoreBufferSize / sizeof(*start_));
-
- old_top_ = old_start_ = new Address[kOldStoreBufferLength];
- old_limit_ = old_start_ + kOldStoreBufferLength;
+ limit_ = start_ + (kStoreBufferSize / kPointerSize);
+
+ old_virtual_memory_ =
+ new VirtualMemory(kOldStoreBufferLength * kPointerSize);
+ old_top_ = old_start_ =
+ reinterpret_cast<Address*>(old_virtual_memory_->address());
+ // Don't know the alignment requirements of the OS, but it is certainly not
+ // less than 0xfff.
+ ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+ int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
+ ASSERT(initial_length > 0);
+ ASSERT(initial_length <= kOldStoreBufferLength);
+ old_limit_ = old_start_ + initial_length;
+ old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+ CHECK(old_virtual_memory_->Commit(
+ reinterpret_cast<void*>(old_start_),
+ (old_limit_ - old_start_) * kPointerSize,
+ false));
ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
@@ -76,9 +92,9 @@ void StoreBuffer::Setup() {
ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
0);
- virtual_memory_->Commit(reinterpret_cast<Address>(start_),
- kStoreBufferSize,
- false); // Not executable.
+ CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+ kStoreBufferSize,
+ false)); // Not executable.
heap_->public_set_store_buffer_top(start_);
hash_map_1_ = new uintptr_t[kHashMapLength];
@@ -90,10 +106,10 @@ void StoreBuffer::Setup() {
void StoreBuffer::TearDown() {
delete virtual_memory_;
+ delete old_virtual_memory_;
delete[] hash_map_1_;
delete[] hash_map_2_;
- delete[] old_start_;
- old_start_ = old_top_ = old_limit_ = NULL;
+ old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
start_ = limit_ = NULL;
heap_->public_set_store_buffer_top(start_);
}
@@ -150,7 +166,18 @@ void StoreBuffer::Uniq() {
}
-void StoreBuffer::HandleFullness() {
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+ while (old_limit_ - old_top_ < space_needed &&
+ old_limit_ < old_reserved_limit_) {
+ size_t grow = old_limit_ - old_start_; // Double size.
+ CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+ grow * kPointerSize,
+ false));
+ old_limit_ += grow;
+ }
+
+ if (old_limit_ - old_top_ >= space_needed) return;
+
if (old_buffer_is_filtered_) return;
ASSERT(may_move_store_buffer_entries_);
Compact();
@@ -645,9 +672,7 @@ void StoreBuffer::Compact() {
// the worst case (compaction doesn't eliminate any pointers).
ASSERT(top <= limit_);
heap_->public_set_store_buffer_top(start_);
- if (top - start_ > old_limit_ - old_top_) {
- HandleFullness();
- }
+ EnsureSpace(top - start_);
ASSERT(may_move_store_buffer_entries_);
// Goes through the addresses in the store buffer attempting to remove
// duplicates. In the interest of speed this is a lossy operation. Some
@@ -688,9 +713,7 @@ void StoreBuffer::Compact() {
void StoreBuffer::CheckForFullBuffer() {
- if (old_limit_ - old_top_ < kStoreBufferSize * 2) {
- HandleFullness();
- }
+ EnsureSpace(kStoreBufferSize * 2);
}
} } // namespace v8::internal
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
index e5e50aeb7..ab2593808 100644
--- a/deps/v8/src/store-buffer.h
+++ b/deps/v8/src/store-buffer.h
@@ -109,7 +109,7 @@ class StoreBuffer {
// been promoted. Rebuilds the store buffer completely if it overflowed.
void SortUniq();
- void HandleFullness();
+ void EnsureSpace(intptr_t space_needed);
void Verify();
bool PrepareForIteration();
@@ -134,6 +134,8 @@ class StoreBuffer {
Address* old_start_;
Address* old_limit_;
Address* old_top_;
+ Address* old_reserved_limit_;
+ VirtualMemory* old_virtual_memory_;
bool old_buffer_is_sorted_;
bool old_buffer_is_filtered_;
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 8b6e28f9f..0d0105c66 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -184,7 +184,7 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, CONSTANT_FUNCTION);
@@ -266,7 +266,7 @@ Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, CONSTANT_FUNCTION);
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index cc42e05fa..f55a36df3 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -92,7 +92,7 @@ class StubCache {
Handle<Code> ComputeLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> ComputeLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@@ -121,7 +121,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadConstant(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> ComputeKeyedLoadInterceptor(Handle<String> name,
Handle<JSObject> receiver,
@@ -518,7 +518,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss);
@@ -568,7 +568,7 @@ class LoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
@@ -603,7 +603,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadConstant(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value);
+ Handle<JSFunction> value);
Handle<Code> CompileLoadInterceptor(Handle<JSObject> object,
Handle<JSObject> holder,
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index c781c615a..e722d1452 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -259,6 +259,7 @@ TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
case CompareIC::STRINGS:
return TypeInfo::String();
case CompareIC::OBJECTS:
+ case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
@@ -278,6 +279,19 @@ bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
}
+Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->id());
+ if (!object->IsCode()) return Handle<Map>::null();
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_compare_ic_stub()) return Handle<Map>::null();
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ if (state != CompareIC::KNOWN_OBJECTS) {
+ return Handle<Map>::null();
+ }
+ return Handle<Map>(code->FindFirstMap());
+}
+
+
TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
Handle<Object> object = GetInfo(expr->id());
TypeInfo unknown = TypeInfo::Unknown();
@@ -367,6 +381,7 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
case CompareIC::OBJECTS:
+ case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 7c9c05ef0..eba098737 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -273,6 +273,7 @@ class TypeFeedbackOracle BASE_EMBEDDED {
TypeInfo BinaryType(BinaryOperation* expr);
TypeInfo CompareType(CompareOperation* expr);
bool IsSymbolCompare(CompareOperation* expr);
+ Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index 11b1a7eeb..1d54e28e9 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -660,6 +660,21 @@ function GetOwnProperty(obj, v) {
}
+// ES5 section 8.12.7.
+function Delete(obj, p, should_throw) {
+ var desc = GetOwnProperty(obj, p);
+ if (IS_UNDEFINED(desc)) return true;
+ if (desc.isConfigurable()) {
+ %DeleteProperty(obj, p, 0);
+ return true;
+ } else if (should_throw) {
+ throw MakeTypeError("define_disallowed", [p]);
+ } else {
+ return;
+ }
+}
+
+
// Harmony proxies.
function DefineProxyProperty(obj, p, attributes, should_throw) {
var handler = %GetHandler(obj);
@@ -677,12 +692,7 @@ function DefineProxyProperty(obj, p, attributes, should_throw) {
// ES5 8.12.9.
-function DefineOwnProperty(obj, p, desc, should_throw) {
- if (%IsJSProxy(obj)) {
- var attributes = FromGenericPropertyDescriptor(desc);
- return DefineProxyProperty(obj, p, attributes, should_throw);
- }
-
+function DefineObjectProperty(obj, p, desc, should_throw) {
var current_or_access = %GetOwnProperty(ToObject(obj), ToString(p));
// A false value here means that access checks failed.
if (current_or_access === false) return void 0;
@@ -846,6 +856,90 @@ function DefineOwnProperty(obj, p, desc, should_throw) {
}
+// ES5 section 15.4.5.1.
+function DefineArrayProperty(obj, p, desc, should_throw) {
+ // Note that the length of an array is not actually stored as part of the
+ // property, hence we use generated code throughout this function instead of
+ // DefineObjectProperty() to modify its value.
+
+ // Step 3 - Special handling for length property.
+ if (p == "length") {
+ var length = obj.length;
+ if (!desc.hasValue()) {
+ return DefineObjectProperty(obj, "length", desc, should_throw);
+ }
+ var new_length = ToUint32(desc.getValue());
+ if (new_length != ToNumber(desc.getValue())) {
+ throw new $RangeError('defineProperty() array length out of range');
+ }
+ var length_desc = GetOwnProperty(obj, "length");
+ if (new_length != length && !length_desc.isWritable()) {
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ var threw = false;
+ while (new_length < length--) {
+ if (!Delete(obj, ToString(length), false)) {
+ new_length = length + 1;
+ threw = true;
+ break;
+ }
+ }
+ // Make sure the below call to DefineObjectProperty() doesn't overwrite
+ // any magic "length" property by removing the value.
+ obj.length = new_length;
+ desc.value_ = void 0;
+ desc.hasValue_ = false;
+ if (!DefineObjectProperty(obj, "length", desc, should_throw) || threw) {
+ if (should_throw) {
+ throw MakeTypeError("redefine_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ return true;
+ }
+
+ // Step 4 - Special handling for array index.
+ var index = ToUint32(p);
+ if (index == ToNumber(p) && index != 4294967295) {
+ var length = obj.length;
+ var length_desc = GetOwnProperty(obj, "length");
+ if ((index >= length && !length_desc.isWritable()) ||
+ !DefineObjectProperty(obj, p, desc, true)) {
+ if (should_throw) {
+ throw MakeTypeError("define_disallowed", [p]);
+ } else {
+ return false;
+ }
+ }
+ if (index >= length) {
+ obj.length = index + 1;
+ }
+ return true;
+ }
+
+ // Step 5 - Fallback to default implementation.
+ return DefineObjectProperty(obj, p, desc, should_throw);
+}
+
+
+// ES5 section 8.12.9, ES5 section 15.4.5.1 and Harmony proxies.
+function DefineOwnProperty(obj, p, desc, should_throw) {
+ if (%IsJSProxy(obj)) {
+ var attributes = FromGenericPropertyDescriptor(desc);
+ return DefineProxyProperty(obj, p, attributes, should_throw);
+ } else if (IS_ARRAY(obj)) {
+ return DefineArrayProperty(obj, p, desc, should_throw);
+ } else {
+ return DefineObjectProperty(obj, p, desc, should_throw);
+ }
+}
+
+
// ES5 section 15.2.3.2.
function ObjectGetPrototypeOf(obj) {
if (!IS_SPEC_OBJECT(obj)) {
diff --git a/deps/v8/src/v8threads.h b/deps/v8/src/v8threads.h
index 4002bb36c..a2aee4e33 100644
--- a/deps/v8/src/v8threads.h
+++ b/deps/v8/src/v8threads.h
@@ -72,7 +72,7 @@ class ThreadState {
};
-// Defined in top.h
+// Defined in isolate.h.
class ThreadLocalTop;
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 3352735dc..8385bd776 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 7
-#define BUILD_NUMBER 12
+#define MINOR_VERSION 8
+#define BUILD_NUMBER 0
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index d578bf9c5..ca3bece5f 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -426,13 +426,7 @@ void Assembler::GetCode(CodeDesc* desc) {
void Assembler::Align(int m) {
ASSERT(IsPowerOf2(m));
int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
- while (delta >= 9) {
- nop(9);
- delta -= 9;
- }
- if (delta > 0) {
- nop(delta);
- }
+ Nop(delta);
}
@@ -441,6 +435,15 @@ void Assembler::CodeTargetAlign() {
}
+bool Assembler::IsNop(Address addr) {
+ Address a = addr;
+ while (*a == 0x66) a++;
+ if (*a == 0x90) return true;
+ if (a[0] == 0xf && a[1] == 0x1f) return true;
+ return false;
+}
+
+
void Assembler::bind_to(Label* L, int pos) {
ASSERT(!L->is_bound()); // Label may only be bound once.
ASSERT(0 <= pos && pos <= pc_offset()); // Position must be valid.
@@ -1763,7 +1766,7 @@ void Assembler::notl(Register dst) {
}
-void Assembler::nop(int n) {
+void Assembler::Nop(int n) {
// The recommended muti-byte sequences of NOP instructions from the Intel 64
// and IA-32 Architectures Software Developer's Manual.
//
@@ -1778,73 +1781,64 @@ void Assembler::nop(int n) {
// 9 bytes 66 NOP DWORD ptr [EAX + EAX*1 + 66 0F 1F 84 00 00 00 00
// 00000000H] 00H
- ASSERT(1 <= n);
- ASSERT(n <= 9);
EnsureSpace ensure_space(this);
- switch (n) {
- case 1:
- emit(0x90);
- return;
- case 2:
- emit(0x66);
- emit(0x90);
- return;
- case 3:
- emit(0x0f);
- emit(0x1f);
- emit(0x00);
- return;
- case 4:
- emit(0x0f);
- emit(0x1f);
- emit(0x40);
- emit(0x00);
- return;
- case 5:
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 6:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x44);
- emit(0x00);
- emit(0x00);
- return;
- case 7:
- emit(0x0f);
- emit(0x1f);
- emit(0x80);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 8:
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
- case 9:
- emit(0x66);
- emit(0x0f);
- emit(0x1f);
- emit(0x84);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- emit(0x00);
- return;
+ while (n > 0) {
+ switch (n) {
+ case 2:
+ emit(0x66);
+ case 1:
+ emit(0x90);
+ return;
+ case 3:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x00);
+ return;
+ case 4:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x40);
+ emit(0x00);
+ return;
+ case 6:
+ emit(0x66);
+ case 5:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x44);
+ emit(0x00);
+ emit(0x00);
+ return;
+ case 7:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x80);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ return;
+ default:
+ case 11:
+ emit(0x66);
+ n--;
+ case 10:
+ emit(0x66);
+ n--;
+ case 9:
+ emit(0x66);
+ n--;
+ case 8:
+ emit(0x0f);
+ emit(0x1f);
+ emit(0x84);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ emit(0x00);
+ n -= 8;
+ }
}
}
@@ -2313,6 +2307,27 @@ void Assembler::fyl2x() {
}
+void Assembler::f2xm1() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xF0);
+}
+
+
+void Assembler::fscale() {
+ EnsureSpace ensure_space(this);
+ emit(0xD9);
+ emit(0xFD);
+}
+
+
+void Assembler::fninit() {
+ EnsureSpace ensure_space(this);
+ emit(0xDB);
+ emit(0xE3);
+}
+
+
void Assembler::fadd(int i) {
EnsureSpace ensure_space(this);
emit_farith(0xDC, 0xC0, i);
@@ -2572,7 +2587,8 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
- ASSERT(is_uint2(imm8));
+ ASSERT(CpuFeatures::IsSupported(SSE4_1));
+ ASSERT(is_uint8(imm8));
EnsureSpace ensure_space(this);
emit(0x66);
emit_optional_rex_32(dst, src);
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 1db5273b2..745850d82 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -636,6 +636,7 @@ class Assembler : public AssemblerBase {
// possible to align the pc offset to a multiple
// of m, where m must be a power of 2.
void Align(int m);
+ void Nop(int bytes = 1);
// Aligns code to something that's optimal for a jump target for the platform.
void CodeTargetAlign();
@@ -1154,7 +1155,6 @@ class Assembler : public AssemblerBase {
void hlt();
void int3();
void nop();
- void nop(int n);
void rdtsc();
void ret(int imm16);
void setcc(Condition cc, Register reg);
@@ -1277,6 +1277,9 @@ class Assembler : public AssemblerBase {
void fcos();
void fptan();
void fyl2x();
+ void f2xm1();
+ void fscale();
+ void fninit();
void frndint();
@@ -1398,7 +1401,7 @@ class Assembler : public AssemblerBase {
return static_cast<int>(reloc_info_writer.pos() - pc_);
}
- static bool IsNop(Address addr) { return *addr == 0x90; }
+ static bool IsNop(Address addr);
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index e423ae3a4..1719496c8 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1305,6 +1305,9 @@ static void ArrayNativeCode(MacroAssembler* masm,
__ jmp(&entry);
__ bind(&loop);
__ movq(kScratchRegister, Operand(r9, rcx, times_pointer_size, 0));
+ if (FLAG_smi_only_arrays) {
+ __ JumpIfNotSmi(kScratchRegister, call_generic_code);
+ }
__ movq(Operand(rdx, 0), kScratchRegister);
__ addq(rdx, Immediate(kPointerSize));
__ bind(&entry);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 96f70bfa9..6f3e0659f 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -1991,152 +1991,259 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
void MathPowStub::Generate(MacroAssembler* masm) {
- // Registers are used as follows:
- // rdx = base
- // rax = exponent
- // rcx = temporary, result
-
- Label allocate_return, call_runtime;
-
- // Load input parameters.
- __ movq(rdx, Operand(rsp, 2 * kPointerSize));
- __ movq(rax, Operand(rsp, 1 * kPointerSize));
+ // Choose register conforming to calling convention (when bailing out).
+#ifdef _WIN64
+ const Register exponent = rdx;
+#else
+ const Register exponent = rdi;
+#endif
+ const Register base = rax;
+ const Register scratch = rcx;
+ const XMMRegister double_result = xmm3;
+ const XMMRegister double_base = xmm2;
+ const XMMRegister double_exponent = xmm1;
+ const XMMRegister double_scratch = xmm4;
- // Save 1 in xmm3 - we need this several times later on.
- __ Set(rcx, 1);
- __ cvtlsi2sd(xmm3, rcx);
+ Label call_runtime, done, exponent_not_smi, int_exponent;
- Label exponent_nonsmi;
- Label base_nonsmi;
- // If the exponent is a heap number go to that specific case.
- __ JumpIfNotSmi(rax, &exponent_nonsmi);
- __ JumpIfNotSmi(rdx, &base_nonsmi);
+ // Save 1 in double_result - we need this several times later on.
+ __ movq(scratch, Immediate(1));
+ __ cvtlsi2sd(double_result, scratch);
+
+ if (exponent_type_ == ON_STACK) {
+ Label base_is_smi, unpack_exponent;
+ // The exponent and base are supplied as arguments on the stack.
+ // This can only happen if the stub is called from non-optimized code.
+ // Load input parameters from stack.
+ __ movq(base, Operand(rsp, 2 * kPointerSize));
+ __ movq(exponent, Operand(rsp, 1 * kPointerSize));
+ __ JumpIfSmi(base, &base_is_smi, Label::kNear);
+ __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+
+ __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset));
+ __ jmp(&unpack_exponent, Label::kNear);
+
+ __ bind(&base_is_smi);
+ __ SmiToInteger32(base, base);
+ __ cvtlsi2sd(double_base, base);
+ __ bind(&unpack_exponent);
+
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ CompareRoot(FieldOperand(exponent, HeapObject::kMapOffset),
+ Heap::kHeapNumberMapRootIndex);
+ __ j(not_equal, &call_runtime);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ } else if (exponent_type_ == TAGGED) {
+ __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
+ __ SmiToInteger32(exponent, exponent);
+ __ jmp(&int_exponent);
+
+ __ bind(&exponent_not_smi);
+ __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
+ }
- // Optimized version when both exponent and base are smis.
- Label powi;
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&powi);
- // Exponent is a smi and base is a heapnumber.
- __ bind(&base_nonsmi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
+ if (exponent_type_ != INTEGER) {
+ Label fast_power;
+ // Detect integer exponents stored as double.
+ __ cvttsd2si(exponent, double_exponent);
+ // Skip to runtime if possibly NaN (indicated by the indefinite integer).
+ __ cmpl(exponent, Immediate(0x80000000u));
+ __ j(equal, &call_runtime);
+ __ cvtlsi2sd(double_scratch, exponent);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_exponent, double_scratch);
+ __ j(equal, &int_exponent);
+
+ if (exponent_type_ == ON_STACK) {
+ // Detect square root case. Crankshaft detects constant +/-0.5 at
+ // compile time and uses DoMathPowHalf instead. We then skip this check
+ // for non-constant cases of +/-0.5 as these hardly occur.
+ Label continue_sqrt, continue_rsqrt, not_plus_half;
+ // Test for 0.5.
+ // Load double_scratch with 0.5.
+ __ movq(scratch, V8_UINT64_C(0x3FE0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &not_plus_half, Label::kNear);
+
+ // Calculates square root of base. Check for the special case of
+ // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_sqrt, Label::kNear);
+ __ j(carry, &continue_sqrt, Label::kNear);
+
+ // Set result to Infinity in the special case.
+ __ xorps(double_result, double_result);
+ __ subsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ __ bind(&continue_sqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_scratch, double_scratch);
+ __ addsd(double_scratch, double_base); // Convert -0 to 0.
+ __ sqrtsd(double_result, double_scratch);
+ __ jmp(&done);
+
+ // Test for -0.5.
+ __ bind(&not_plus_half);
+ // Load double_scratch with -0.5 by substracting 1.
+ __ subsd(double_scratch, double_result);
+ // Already ruled out NaNs for exponent.
+ __ ucomisd(double_scratch, double_exponent);
+ __ j(not_equal, &fast_power, Label::kNear);
+
+ // Calculates reciprocal of square root of base. Check for the special
+ // case of Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+ // According to IEEE-754, double-precision -Infinity has the highest
+ // 12 bits set and the lowest 52 bits cleared.
+ __ movq(scratch, V8_UINT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(double_scratch, scratch);
+ __ ucomisd(double_scratch, double_base);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &continue_rsqrt, Label::kNear);
+ __ j(carry, &continue_rsqrt, Label::kNear);
+
+ // Set result to 0 in the special case.
+ __ xorps(double_result, double_result);
+ __ jmp(&done);
+
+ __ bind(&continue_rsqrt);
+ // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
+ __ xorps(double_exponent, double_exponent);
+ __ addsd(double_exponent, double_base); // Convert -0 to +0.
+ __ sqrtsd(double_exponent, double_exponent);
+ __ divsd(double_result, double_exponent);
+ __ jmp(&done);
+ }
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // Using FPU instructions to calculate power.
+ Label fast_power_failed;
+ __ bind(&fast_power);
+ __ fnclex(); // Clear flags to catch exceptions later.
+ // Transfer (B)ase and (E)xponent onto the FPU register stack.
+ __ subq(rsp, Immediate(kDoubleSize));
+ __ movsd(Operand(rsp, 0), double_exponent);
+ __ fld_d(Operand(rsp, 0)); // E
+ __ movsd(Operand(rsp, 0), double_base);
+ __ fld_d(Operand(rsp, 0)); // B, E
+
+ // Exponent is in st(1) and base is in st(0)
+ // B ^ E = (2^(E * log2(B)) - 1) + 1 = (2^X - 1) + 1 for X = E * log2(B)
+ // FYL2X calculates st(1) * log2(st(0))
+ __ fyl2x(); // X
+ __ fld(0); // X, X
+ __ frndint(); // rnd(X), X
+ __ fsub(1); // rnd(X), X-rnd(X)
+ __ fxch(1); // X - rnd(X), rnd(X)
+ // F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
+ __ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
+ __ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
+ // FSCALE calculates st(0) * 2^st(1)
+ __ fscale(); // 2^X, rnd(X)
+ __ fstp(1);
+ // Bail out to runtime in case of exceptions in the status word.
+ __ fnstsw_ax();
+ __ testb(rax, Immediate(0x5F)); // Check for all but precision exception.
+ __ j(not_zero, &fast_power_failed, Label::kNear);
+ __ fstp_d(Operand(rsp, 0));
+ __ movsd(double_result, Operand(rsp, 0));
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&done);
- // Optimized version of pow if exponent is a smi.
- // xmm0 contains the base.
- __ bind(&powi);
- __ SmiToInteger32(rax, rax);
+ __ bind(&fast_power_failed);
+ __ fninit();
+ __ addq(rsp, Immediate(kDoubleSize));
+ __ jmp(&call_runtime);
+ }
- // Save exponent in base as we need to check if exponent is negative later.
- // We know that base and exponent are in different registers.
- __ movq(rdx, rax);
+ // Calculate power with integer exponent.
+ __ bind(&int_exponent);
+ const XMMRegister double_scratch2 = double_exponent;
+ // Back up exponent as we need to check if exponent is negative later.
+ __ movq(scratch, exponent); // Back up exponent.
+ __ movsd(double_scratch, double_base); // Back up base.
+ __ movsd(double_scratch2, double_result); // Load double_exponent with 1.
// Get absolute value of exponent.
- Label no_neg;
- __ cmpl(rax, Immediate(0));
- __ j(greater_equal, &no_neg, Label::kNear);
- __ negl(rax);
+ Label no_neg, while_true, no_multiply;
+ __ testl(scratch, scratch);
+ __ j(positive, &no_neg, Label::kNear);
+ __ negl(scratch);
__ bind(&no_neg);
- // Load xmm1 with 1.
- __ movaps(xmm1, xmm3);
- Label while_true;
- Label no_multiply;
-
__ bind(&while_true);
- __ shrl(rax, Immediate(1));
+ __ shrl(scratch, Immediate(1));
__ j(not_carry, &no_multiply, Label::kNear);
- __ mulsd(xmm1, xmm0);
+ __ mulsd(double_result, double_scratch);
__ bind(&no_multiply);
- __ mulsd(xmm0, xmm0);
- __ j(not_zero, &while_true);
-
- // Base has the original value of the exponent - if the exponent is
- // negative return 1/result.
- __ testl(rdx, rdx);
- __ j(positive, &allocate_return);
- // Special case if xmm1 has reached infinity.
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ xorps(xmm0, xmm0);
- __ ucomisd(xmm0, xmm1);
- __ j(equal, &call_runtime);
-
- __ jmp(&allocate_return);
-
- // Exponent (or both) is a heapnumber - no matter what we should now work
- // on doubles.
- __ bind(&exponent_nonsmi);
- __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- // Test if exponent is nan.
- __ ucomisd(xmm1, xmm1);
- __ j(parity_even, &call_runtime);
- Label base_not_smi, handle_special_cases;
- __ JumpIfNotSmi(rdx, &base_not_smi, Label::kNear);
- __ SmiToInteger32(rdx, rdx);
- __ cvtlsi2sd(xmm0, rdx);
- __ jmp(&handle_special_cases, Label::kNear);
+ __ mulsd(double_scratch, double_scratch);
+ __ j(not_zero, &while_true);
- __ bind(&base_not_smi);
- __ CompareRoot(FieldOperand(rdx, HeapObject::kMapOffset),
- Heap::kHeapNumberMapRootIndex);
- __ j(not_equal, &call_runtime);
- __ movl(rcx, FieldOperand(rdx, HeapNumber::kExponentOffset));
- __ andl(rcx, Immediate(HeapNumber::kExponentMask));
- __ cmpl(rcx, Immediate(HeapNumber::kExponentMask));
- // base is NaN or +/-Infinity
- __ j(greater_equal, &call_runtime);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ // If the exponent is negative, return 1/result.
+ __ testl(exponent, exponent);
+ __ j(greater, &done);
+ __ divsd(double_scratch2, double_result);
+ __ movsd(double_result, double_scratch2);
+ // Test whether result is zero. Bail out to check for subnormal result.
+ // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+ __ xorps(double_scratch2, double_scratch2);
+ __ ucomisd(double_scratch2, double_result);
+ // double_exponent aliased as double_scratch2 has already been overwritten
+ // and may not have contained the exponent value in the first place when the
+ // input was a smi. We reset it with exponent value before bailing out.
+ __ j(not_equal, &done);
+ __ cvtlsi2sd(double_exponent, exponent);
+
+ // Returning or bailing out.
+ Counters* counters = masm->isolate()->counters();
+ if (exponent_type_ == ON_STACK) {
+ // The arguments are still on the stack.
+ __ bind(&call_runtime);
+ __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
- // base is in xmm0 and exponent is in xmm1.
- __ bind(&handle_special_cases);
- Label not_minus_half;
- // Test for -0.5.
- // Load xmm2 with -0.5.
- __ movq(rcx, V8_UINT64_C(0xBFE0000000000000), RelocInfo::NONE);
- __ movq(xmm2, rcx);
- // xmm2 now has -0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &not_minus_half, Label::kNear);
-
- // Calculates reciprocal of square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0);
- __ sqrtsd(xmm1, xmm1);
- __ divsd(xmm3, xmm1);
- __ movaps(xmm1, xmm3);
- __ jmp(&allocate_return);
-
- // Test for 0.5.
- __ bind(&not_minus_half);
- // Load xmm2 with 0.5.
- // Since xmm3 is 1 and xmm2 is -0.5 this is simply xmm2 + xmm3.
- __ addsd(xmm2, xmm3);
- // xmm2 now has 0.5.
- __ ucomisd(xmm2, xmm1);
- __ j(not_equal, &call_runtime);
- // Calculates square root.
- // sqrtsd returns -0 when input is -0. ECMA spec requires +0.
- __ xorps(xmm1, xmm1);
- __ addsd(xmm1, xmm0); // Convert -0 to 0.
- __ sqrtsd(xmm1, xmm1);
-
- __ bind(&allocate_return);
- __ AllocateHeapNumber(rcx, rax, &call_runtime);
- __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm1);
- __ movq(rax, rcx);
- __ ret(2 * kPointerSize);
+ // The stub is called from non-optimized code, which expects the result
+ // as heap number in eax.
+ __ bind(&done);
+ __ AllocateHeapNumber(rax, rcx, &call_runtime);
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), double_result);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(2 * kPointerSize);
+ } else {
+ __ bind(&call_runtime);
+ // Move base to the correct argument register. Exponent is already in xmm1.
+ __ movsd(xmm0, double_base);
+ ASSERT(double_exponent.is(xmm1));
+ {
+ AllowExternalCallThatCantCauseGC scope(masm);
+ __ PrepareCallCFunction(2);
+ __ CallCFunction(
+ ExternalReference::power_double_double_function(masm->isolate()), 2);
+ }
+ // Return value is in xmm0.
+ __ movsd(double_result, xmm0);
+ // Restore context register.
+ __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
- __ bind(&call_runtime);
- __ TailCallRuntime(Runtime::kMath_pow_cfunction, 2, 1);
+ __ bind(&done);
+ __ IncrementCounter(counters->math_pow(), 1);
+ __ ret(0);
+ }
}
@@ -5501,32 +5608,45 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
}
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
- // Save the registers.
- __ pop(rcx);
- __ push(rdx);
- __ push(rax);
- __ push(rcx);
+void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+ Label miss;
+ Condition either_smi = masm->CheckEitherSmi(rdx, rax);
+ __ j(either_smi, &miss, Label::kNear);
- // Call the runtime system in a fresh internal frame.
- ExternalReference miss =
- ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+ __ movq(rcx, FieldOperand(rax, HeapObject::kMapOffset));
+ __ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
+ __ Cmp(rcx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+ __ Cmp(rbx, known_map_);
+ __ j(not_equal, &miss, Label::kNear);
+
+ __ subq(rax, rdx);
+ __ ret(0);
+
+ __ bind(&miss);
+ GenerateMiss(masm);
+}
+
+
+void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
{
+ // Call the runtime system in a fresh internal frame.
+ ExternalReference miss =
+ ExternalReference(IC_Utility(IC::kCompareIC_Miss), masm->isolate());
+
FrameScope scope(masm, StackFrame::INTERNAL);
__ push(rdx);
__ push(rax);
+ __ push(rdx);
+ __ push(rax);
__ Push(Smi::FromInt(op_));
__ CallExternalReference(miss, 3);
- }
- // Compute the entry point of the rewritten stub.
- __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
-
- // Restore registers.
- __ pop(rcx);
- __ pop(rax);
- __ pop(rdx);
- __ push(rcx);
+ // Compute the entry point of the rewritten stub.
+ __ lea(rdi, FieldOperand(rax, Code::kHeaderSize));
+ __ pop(rax);
+ __ pop(rdx);
+ }
// Do a tail call to the rewritten stub.
__ jmp(rdi);
diff --git a/deps/v8/src/x64/debug-x64.cc b/deps/v8/src/x64/debug-x64.cc
index 339b961fe..f84772eb9 100644
--- a/deps/v8/src/x64/debug-x64.cc
+++ b/deps/v8/src/x64/debug-x64.cc
@@ -264,9 +264,7 @@ void Debug::GenerateSlot(MacroAssembler* masm) {
Label check_codesize;
__ bind(&check_codesize);
__ RecordDebugBreakSlot();
- for (int i = 0; i < Assembler::kDebugBreakSlotLength; i++) {
- __ nop();
- }
+ __ Nop(Assembler::kDebugBreakSlotLength);
ASSERT_EQ(Assembler::kDebugBreakSlotLength,
masm->SizeOfCodeGeneratedSince(&check_codesize));
}
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 1fd78fc57..d684ad713 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -138,8 +138,8 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
ASSERT(*(call_target_address - 3) == 0x73 && // jae
*(call_target_address - 2) == 0x07 && // offset
*(call_target_address - 1) == 0xe8); // call
- *(call_target_address - 3) = 0x90; // nop
- *(call_target_address - 2) = 0x90; // nop
+ *(call_target_address - 3) = 0x66; // 2 byte nop part 1
+ *(call_target_address - 2) = 0x90; // 2 byte nop part 2
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -157,8 +157,8 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
- ASSERT(*(call_target_address - 3) == 0x90 && // nop
- *(call_target_address - 2) == 0x90 && // nop
+ ASSERT(*(call_target_address - 3) == 0x66 && // 2 byte nop part 1
+ *(call_target_address - 2) == 0x90 && // 2 byte nop part 2
*(call_target_address - 1) == 0xe8); // call
*(call_target_address - 3) = 0x73; // jae
*(call_target_address - 2) = 0x07; // offset
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index 1b8871fd4..5cbdad7ac 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -109,6 +109,7 @@ static const ByteMnemonic zero_operands_instr[] = {
{ 0xC3, UNSET_OP_ORDER, "ret" },
{ 0xC9, UNSET_OP_ORDER, "leave" },
{ 0xF4, UNSET_OP_ORDER, "hlt" },
+ { 0xFC, UNSET_OP_ORDER, "cld" },
{ 0xCC, UNSET_OP_ORDER, "int3" },
{ 0x60, UNSET_OP_ORDER, "pushad" },
{ 0x61, UNSET_OP_ORDER, "popad" },
@@ -910,15 +911,19 @@ int DisassemblerX64::RegisterFPUInstruction(int escape_opcode,
switch (modrm_byte) {
case 0xE0: mnem = "fchs"; break;
case 0xE1: mnem = "fabs"; break;
+ case 0xE3: mnem = "fninit"; break;
case 0xE4: mnem = "ftst"; break;
case 0xE8: mnem = "fld1"; break;
case 0xEB: mnem = "fldpi"; break;
case 0xED: mnem = "fldln2"; break;
case 0xEE: mnem = "fldz"; break;
+ case 0xF0: mnem = "f2xm1"; break;
case 0xF1: mnem = "fyl2x"; break;
+ case 0xF2: mnem = "fptan"; break;
case 0xF5: mnem = "fprem1"; break;
case 0xF7: mnem = "fincstp"; break;
case 0xF8: mnem = "fprem"; break;
+ case 0xFD: mnem = "fscale"; break;
case 0xFE: mnem = "fsin"; break;
case 0xFF: mnem = "fcos"; break;
default: UnimplementedInstruction();
@@ -1034,7 +1039,18 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
}
} else {
get_modrm(*current, &mod, &regop, &rm);
- if (opcode == 0x28) {
+ if (opcode == 0x1f) {
+ current++;
+ if (rm == 4) { // SIB byte present.
+ current++;
+ }
+ if (mod == 1) { // Byte displacement.
+ current += 1;
+ } else if (mod == 2) { // 32-bit displacement.
+ current += 4;
+ } // else no immediate displacement.
+ AppendToBuffer("nop");
+ } else if (opcode == 0x28) {
AppendToBuffer("movapd %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
} else if (opcode == 0x29) {
@@ -1178,7 +1194,7 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
int mod, regop, rm;
get_modrm(*current, &mod, &regop, &rm);
current++;
- if (regop == 4) { // SIB byte present.
+ if (rm == 4) { // SIB byte present.
current++;
}
if (mod == 1) { // Byte displacement.
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 963912f66..24df20ba7 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -2820,7 +2820,7 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- MathPowStub stub;
+ MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(rax);
}
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 3a577530d..b3a94227a 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1641,6 +1641,9 @@ void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
rewritten = stub.GetCode();
} else {
ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
rewritten = stub.GetCode();
}
set_target(*rewritten);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index cbbe65f0c..293a1db61 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -49,7 +49,9 @@ class SafepointGenerator : public CallWrapper {
deopt_mode_(mode) { }
virtual ~SafepointGenerator() { }
- virtual void BeforeCall(int call_size) const { }
+ virtual void BeforeCall(int call_size) const {
+ codegen_->EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - call_size);
+ }
virtual void AfterCall() const {
codegen_->RecordSafepoint(pointers_, deopt_mode_);
@@ -241,7 +243,7 @@ bool LCodeGen::GenerateBody() {
instr->CompileToNative(this);
}
}
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
return !is_aborted();
}
@@ -439,6 +441,7 @@ void LCodeGen::CallCodeGeneric(Handle<Code> code,
LInstruction* instr,
SafepointMode safepoint_mode,
int argc) {
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size() - masm()->CallSize(code));
ASSERT(instr != NULL);
LPointerMap* pointers = instr->pointer_map();
RecordPosition(pointers->position());
@@ -1151,8 +1154,13 @@ void LCodeGen::DoConstantD(LConstantD* instr) {
void LCodeGen::DoConstantT(LConstantT* instr) {
- ASSERT(instr->result()->IsRegister());
- __ Move(ToRegister(instr->result()), instr->value());
+ Handle<Object> value = instr->value();
+ if (value->IsSmi()) {
+ __ Move(ToRegister(instr->result()), value);
+ } else {
+ __ LoadHeapObject(ToRegister(instr->result()),
+ Handle<HeapObject>::cast(value));
+ }
}
@@ -1929,7 +1937,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
InstanceofStub stub(flags);
__ push(ToRegister(instr->InputAt(0)));
- __ Push(instr->function());
+ __ PushHeapObject(instr->function());
static const int kAdditionalDelta = 10;
int delta =
@@ -1999,13 +2007,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
Register result = ToRegister(instr->result());
- if (result.is(rax)) {
- __ load_rax(instr->hydrogen()->cell().location(),
- RelocInfo::GLOBAL_PROPERTY_CELL);
- } else {
- __ movq(result, instr->hydrogen()->cell(), RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- }
+ __ LoadGlobalCell(result, instr->hydrogen()->cell());
if (instr->hydrogen()->RequiresHoleCheck()) {
__ CompareRoot(result, Heap::kTheHoleValueRootIndex);
DeoptimizeIf(equal, instr->environment());
@@ -2045,25 +2047,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
// Store the value.
__ movq(Operand(address, 0), value);
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- Label smi_store;
- HType type = instr->hydrogen()->value()->type();
- if (!type.IsHeapNumber() && !type.IsString() && !type.IsNonPrimitive()) {
- __ JumpIfSmi(value, &smi_store, Label::kNear);
- }
-
- int offset = JSGlobalPropertyCell::kValueOffset - kHeapObjectTag;
- __ lea(object, Operand(address, -offset));
- // Cells are always in the remembered set.
- __ RecordWrite(object,
- address,
- value,
- kSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- __ bind(&smi_store);
- }
+ // Cells are always rescanned, so no write barrier here.
}
@@ -2083,13 +2067,22 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
__ movq(result, ContextOperand(context, instr->slot_index()));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
}
void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Register context = ToRegister(instr->context());
Register value = ToRegister(instr->value());
- __ movq(ContextOperand(context, instr->slot_index()), value);
+ Operand target = ContextOperand(context, instr->slot_index());
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
+ }
+ __ movq(target, value);
if (instr->hydrogen()->NeedsWriteBarrier()) {
HType type = instr->hydrogen()->value()->type();
SmiCheck check_needed =
@@ -2141,7 +2134,7 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result,
}
} else {
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type));
- LoadHeapObject(result, Handle<HeapObject>::cast(function));
+ __ LoadHeapObject(result, function);
}
}
@@ -2563,7 +2556,7 @@ void LCodeGen::DoPushArgument(LPushArgument* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- LoadHeapObject(result, instr->hydrogen()->closure());
+ __ LoadHeapObject(result, instr->hydrogen()->closure());
}
@@ -2634,7 +2627,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->function());
+ __ LoadHeapObject(rdi, instr->function());
CallKnownFunction(instr->function(),
instr->arity(),
instr,
@@ -2808,10 +2801,10 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
// This addition might give a result that isn't the correct for
// rounding, due to loss of precision, but only for a number that's
// so big that the conversion below will overflow anyway.
- __ addsd(input_reg, xmm_scratch);
+ __ addsd(xmm_scratch, input_reg);
// Compute Math.floor(input).
// Use truncating instruction (OK because input is positive).
- __ cvttsd2si(output_reg, input_reg);
+ __ cvttsd2si(output_reg, xmm_scratch);
// Overflow is signalled with minint.
__ cmpl(output_reg, Immediate(0x80000000));
DeoptimizeIf(equal, instr->environment());
@@ -2849,65 +2842,68 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->InputAt(0));
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+
+ // Note that according to ECMA-262 15.8.2.13:
+ // Math.pow(-Infinity, 0.5) == Infinity
+ // Math.sqrt(-Infinity) == NaN
+ Label done, sqrt;
+ // Check base for -Infinity. According to IEEE-754, double-precision
+ // -Infinity has the highest 12 bits set and the lowest 52 bits cleared.
+ __ movq(kScratchRegister, V8_INT64_C(0xFFF0000000000000), RelocInfo::NONE);
+ __ movq(xmm_scratch, kScratchRegister);
+ __ ucomisd(xmm_scratch, input_reg);
+ // Comparing -Infinity with NaN results in "unordered", which sets the
+ // zero flag as if both were equal. However, it also sets the carry flag.
+ __ j(not_equal, &sqrt, Label::kNear);
+ __ j(carry, &sqrt, Label::kNear);
+ // If input is -Infinity, return Infinity.
+ __ xorps(input_reg, input_reg);
+ __ subsd(input_reg, xmm_scratch);
+ __ jmp(&done, Label::kNear);
+
+ // Square root.
+ __ bind(&sqrt);
__ xorps(xmm_scratch, xmm_scratch);
__ addsd(input_reg, xmm_scratch); // Convert -0 to +0.
__ sqrtsd(input_reg, input_reg);
+ __ bind(&done);
}
void LCodeGen::DoPower(LPower* instr) {
- LOperand* left = instr->InputAt(0);
- XMMRegister left_reg = ToDoubleRegister(left);
- ASSERT(!left_reg.is(xmm1));
- LOperand* right = instr->InputAt(1);
- XMMRegister result_reg = ToDoubleRegister(instr->result());
Representation exponent_type = instr->hydrogen()->right()->representation();
- if (exponent_type.IsDouble()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers
- __ movaps(xmm0, left_reg);
- ASSERT(ToDoubleRegister(right).is(xmm1));
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
- } else if (exponent_type.IsInteger32()) {
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers: xmm0 and edi (not rdi).
- // On Windows, the registers are xmm0 and edx.
- __ movaps(xmm0, left_reg);
+ // Having marked this as a call, we can use any registers.
+ // Just make sure that the input/output registers are the expected ones.
+
+ // Choose register conforming to calling convention (when bailing out).
#ifdef _WIN64
- ASSERT(ToRegister(right).is(rdx));
+ Register exponent = rdx;
#else
- ASSERT(ToRegister(right).is(rdi));
+ Register exponent = rdi;
#endif
- __ CallCFunction(
- ExternalReference::power_double_int_function(isolate()), 2);
- } else {
- ASSERT(exponent_type.IsTagged());
- Register right_reg = ToRegister(right);
-
- Label non_smi, call;
- __ JumpIfNotSmi(right_reg, &non_smi);
- __ SmiToInteger32(right_reg, right_reg);
- __ cvtlsi2sd(xmm1, right_reg);
- __ jmp(&call);
-
- __ bind(&non_smi);
- __ CmpObjectType(right_reg, HEAP_NUMBER_TYPE , kScratchRegister);
+ ASSERT(!instr->InputAt(1)->IsRegister() ||
+ ToRegister(instr->InputAt(1)).is(exponent));
+ ASSERT(!instr->InputAt(1)->IsDoubleRegister() ||
+ ToDoubleRegister(instr->InputAt(1)).is(xmm1));
+ ASSERT(ToDoubleRegister(instr->InputAt(0)).is(xmm2));
+ ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
+
+ if (exponent_type.IsTagged()) {
+ Label no_deopt;
+ __ JumpIfSmi(exponent, &no_deopt);
+ __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
DeoptimizeIf(not_equal, instr->environment());
- __ movsd(xmm1, FieldOperand(right_reg, HeapNumber::kValueOffset));
-
- __ bind(&call);
- __ PrepareCallCFunction(2);
- // Move arguments to correct registers xmm0 and xmm1.
- __ movaps(xmm0, left_reg);
- // Right argument is already in xmm1.
- __ CallCFunction(
- ExternalReference::power_double_double_function(isolate()), 2);
+ __ bind(&no_deopt);
+ MathPowStub stub(MathPowStub::TAGGED);
+ __ CallStub(&stub);
+ } else if (exponent_type.IsInteger32()) {
+ MathPowStub stub(MathPowStub::INTEGER);
+ __ CallStub(&stub);
+ } else {
+ ASSERT(exponent_type.IsDouble());
+ MathPowStub stub(MathPowStub::DOUBLE);
+ __ CallStub(&stub);
}
- // Return value is in xmm0.
- __ movaps(result_reg, xmm0);
- // Restore context register.
- __ movq(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
}
@@ -3042,7 +3038,7 @@ void LCodeGen::DoCallGlobal(LCallGlobal* instr) {
void LCodeGen::DoCallKnownGlobal(LCallKnownGlobal* instr) {
ASSERT(ToRegister(instr->result()).is(rax));
- __ Move(rdi, instr->target());
+ __ LoadHeapObject(rdi, instr->target());
CallKnownFunction(instr->target(), instr->arity(), instr, CALL_AS_FUNCTION);
}
@@ -3725,9 +3721,16 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
void LCodeGen::DoCheckFunction(LCheckFunction* instr) {
- ASSERT(instr->InputAt(0)->IsRegister());
- Register reg = ToRegister(instr->InputAt(0));
- __ Cmp(reg, instr->hydrogen()->target());
+ Register reg = ToRegister(instr->value());
+ Handle<JSFunction> target = instr->hydrogen()->target();
+ if (isolate()->heap()->InNewSpace(*target)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(target);
+ __ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ __ cmpq(reg, Operand(kScratchRegister, 0));
+ } else {
+ __ Cmp(reg, target);
+ }
DeoptimizeIf(not_equal, instr->environment());
}
@@ -3793,18 +3796,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
}
-void LCodeGen::LoadHeapObject(Register result, Handle<HeapObject> object) {
- if (heap()->InNewSpace(*object)) {
- Handle<JSGlobalPropertyCell> cell =
- factory()->NewJSGlobalPropertyCell(object);
- __ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
- __ movq(result, Operand(result, 0));
- } else {
- __ Move(result, object);
- }
-}
-
-
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Register reg = ToRegister(instr->TempAt(0));
@@ -3812,7 +3803,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
Handle<JSObject> current_prototype = instr->prototype();
// Load prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
// Check prototype maps up to the holder.
while (!current_prototype.is_identical_to(holder)) {
@@ -3822,7 +3813,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
current_prototype =
Handle<JSObject>(JSObject::cast(current_prototype->GetPrototype()));
// Load next prototype object.
- LoadHeapObject(reg, current_prototype);
+ __ LoadHeapObject(reg, current_prototype);
}
// Check the holder map.
@@ -3833,16 +3824,32 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
- Handle<FixedArray> constant_elements = instr->hydrogen()->constant_elements();
- ASSERT_EQ(2, constant_elements->length());
- ElementsKind constant_elements_kind =
- static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+ Heap* heap = isolate()->heap();
+ ElementsKind boilerplate_elements_kind =
+ instr->hydrogen()->boilerplate_elements_kind();
+
+ // Deopt if the array literal boilerplate ElementsKind is of a type different
+ // than the expected one. The check isn't necessary if the boilerplate has
+ // already been converted to FAST_ELEMENTS.
+ if (boilerplate_elements_kind != FAST_ELEMENTS) {
+ __ LoadHeapObject(rax, instr->hydrogen()->boilerplate_object());
+ __ movq(rbx, FieldOperand(rax, HeapObject::kMapOffset));
+ // Load the map's "bit field 2".
+ __ movb(rbx, FieldOperand(rbx, Map::kBitField2Offset));
+ // Retrieve elements_kind from bit field 2.
+ __ and_(rbx, Immediate(Map::kElementsKindMask));
+ __ cmpb(rbx, Immediate(boilerplate_elements_kind <<
+ Map::kElementsKindShift));
+ DeoptimizeIf(not_equal, instr->environment());
+ }
// Setup the parameters to the stub/runtime call.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(rax, JSFunction::kLiteralsOffset));
__ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- __ Push(instr->hydrogen()->constant_elements());
+ // Boilerplate already exists, constant elements are never accessed.
+ // Pass an empty fixed array.
+ __ Push(Handle<FixedArray>(heap->empty_fixed_array()));
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -3858,9 +3865,9 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
- constant_elements_kind == FAST_DOUBLE_ELEMENTS
- ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- : FastCloneShallowArrayStub::CLONE_ELEMENTS;
+ boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
+ ? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
+ : FastCloneShallowArrayStub::CLONE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, length);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
}
@@ -3899,10 +3906,10 @@ void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ lea(rcx, Operand(result, *offset));
__ movq(FieldOperand(result, total_offset), rcx);
- LoadHeapObject(source, value_object);
+ __ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset);
} else if (value->IsHeapObject()) {
- LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
+ __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
__ movq(FieldOperand(result, total_offset), rcx);
} else {
__ movq(rcx, value, RelocInfo::NONE);
@@ -3927,7 +3934,7 @@ void LCodeGen::DoObjectLiteralFast(LObjectLiteralFast* instr) {
__ bind(&allocated);
int offset = 0;
- LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
+ __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset);
ASSERT_EQ(size, offset);
}
@@ -4053,7 +4060,12 @@ void LCodeGen::DoTypeof(LTypeof* instr) {
void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
ASSERT(!operand->IsDoubleRegister());
if (operand->IsConstantOperand()) {
- __ Push(ToHandle(LConstantOperand::cast(operand)));
+ Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
+ if (object->IsSmi()) {
+ __ Push(Handle<Smi>::cast(object));
+ } else {
+ __ PushHeapObject(Handle<HeapObject>::cast(object));
+ }
} else if (operand->IsRegister()) {
__ push(ToRegister(operand));
} else {
@@ -4176,25 +4188,20 @@ void LCodeGen::EmitIsConstructCall(Register temp) {
}
-void LCodeGen::EnsureSpaceForLazyDeopt() {
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
// Ensure that we have enough space after the previous lazy-bailout
// instruction for patching the code here.
int current_pc = masm()->pc_offset();
- int patch_size = Deoptimizer::patch_size();
- if (current_pc < last_lazy_deopt_pc_ + patch_size) {
- int padding_size = last_lazy_deopt_pc_ + patch_size - current_pc;
- while (padding_size > 0) {
- int nop_size = padding_size > 9 ? 9 : padding_size;
- __ nop(nop_size);
- padding_size -= nop_size;
- }
+ if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+ int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+ __ Nop(padding_size);
}
- last_lazy_deopt_pc_ = masm()->pc_offset();
}
void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
ASSERT(instr->HasEnvironment());
LEnvironment* env = instr->environment();
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
@@ -4272,7 +4279,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ j(above_equal, &done, Label::kNear);
StackCheckStub stub;
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(&done);
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -4283,7 +4291,8 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
new DeferredStackCheck(this, instr);
__ CompareRoot(rsp, Heap::kStackLimitRootIndex);
__ j(below, deferred_stack_check->entry());
- EnsureSpaceForLazyDeopt();
+ EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+ last_lazy_deopt_pc_ = masm()->pc_offset();
__ bind(instr->done_label());
deferred_stack_check->SetExit(instr->done_label());
RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 868f75e90..832942f1e 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -198,7 +198,6 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
CallKind call_kind);
- void LoadHeapObject(Register result, Handle<HeapObject> object);
void RecordSafepointWithLazyDeopt(LInstruction* instr,
SafepointMode safepoint_mode,
@@ -305,7 +304,7 @@ class LCodeGen BASE_EMBEDDED {
Address address;
};
- void EnsureSpaceForLazyDeopt();
+ void EnsureSpaceForLazyDeopt(int space_needed);
LChunk* const chunk_;
MacroAssembler* const masm_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index b486faec6..7fa8a3999 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -1397,7 +1397,7 @@ LInstruction* LChunkBuilder::DoPower(HPower* instr) {
UseFixed(instr->right(), rdi);
#endif
LPower* result = new LPower(left, right);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr,
+ return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
CAN_DEOPTIMIZE_EAGERLY);
}
@@ -1786,7 +1786,8 @@ LInstruction* LChunkBuilder::DoStoreGlobalGeneric(HStoreGlobalGeneric* instr) {
LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
LOperand* context = UseRegisterAtStart(instr->value());
- return DefineAsRegister(new LLoadContextSlot(context));
+ LInstruction* result = DefineAsRegister(new LLoadContextSlot(context));
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
@@ -1803,7 +1804,8 @@ LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
value = UseRegister(instr->value());
temp = NULL;
}
- return new LStoreContextSlot(context, value, temp);
+ LInstruction* result = new LStoreContextSlot(context, value, temp);
+ return instr->RequiresHoleCheck() ? AssignEnvironment(result) : result;
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index c21223b3a..280d2358c 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -1781,6 +1781,8 @@ class LCheckFunction: public LTemplateInstruction<0, 1, 0> {
inputs_[0] = value;
}
+ LOperand* value() { return InputAt(0); }
+
DECLARE_CONCRETE_INSTRUCTION(CheckFunction, "check-function")
DECLARE_HYDROGEN_ACCESSOR(CheckFunction)
};
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index caca628f1..10e423b5b 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -2238,6 +2238,43 @@ void MacroAssembler::Push(Handle<Object> source) {
}
+void MacroAssembler::LoadHeapObject(Register result,
+ Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(result, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(result, Operand(result, 0));
+ } else {
+ Move(result, object);
+ }
+}
+
+
+void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
+ if (isolate()->heap()->InNewSpace(*object)) {
+ Handle<JSGlobalPropertyCell> cell =
+ isolate()->factory()->NewJSGlobalPropertyCell(object);
+ movq(kScratchRegister, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(kScratchRegister, Operand(kScratchRegister, 0));
+ push(kScratchRegister);
+ } else {
+ Push(object);
+ }
+}
+
+
+void MacroAssembler::LoadGlobalCell(Register dst,
+ Handle<JSGlobalPropertyCell> cell) {
+ if (dst.is(rax)) {
+ load_rax(cell.location(), RelocInfo::GLOBAL_PROPERTY_CELL);
+ } else {
+ movq(dst, cell, RelocInfo::GLOBAL_PROPERTY_CELL);
+ movq(dst, Operand(dst, 0));
+ }
+}
+
+
void MacroAssembler::Push(Smi* source) {
intptr_t smi = reinterpret_cast<intptr_t>(source);
if (is_int32(smi)) {
@@ -3049,7 +3086,7 @@ void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
ASSERT(flag == JUMP_FUNCTION || has_frame());
// Get the function and setup the context.
- Move(rdi, function);
+ LoadHeapObject(rdi, function);
movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// We call indirectly through the code field in the function to
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index cf03e59e5..8046e5cd3 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -784,6 +784,14 @@ class MacroAssembler: public Assembler {
void Cmp(const Operand& dst, Smi* src);
void Push(Handle<Object> source);
+ // Load a heap object and handle the case of new-space objects by
+ // indirecting via a global cell.
+ void LoadHeapObject(Register result, Handle<HeapObject> object);
+ void PushHeapObject(Handle<HeapObject> object);
+
+ // Load a global cell into a register.
+ void LoadGlobalCell(Register dst, Handle<JSGlobalPropertyCell> cell);
+
// Emit code to discard a non-negative number of pointer-sized elements
// from the stack, clobbering only the rsp register.
void Drop(int stack_elements);
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 5a81c8974..a28dbbf02 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -421,7 +421,7 @@ static void GenerateFastApiCall(MacroAssembler* masm,
// -----------------------------------
// Get the function and setup the context.
Handle<JSFunction> function = optimization.constant_function();
- __ Move(rdi, function);
+ __ LoadHeapObject(rdi, function);
__ movq(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
// Pass the additional arguments.
@@ -1015,7 +1015,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1026,7 +1026,7 @@ void StubCompiler::GenerateLoadConstant(Handle<JSObject> object,
object, receiver, holder, scratch1, scratch2, scratch3, name, miss);
// Return the constant value.
- __ Move(rax, value);
+ __ LoadHeapObject(rax, value);
__ ret(0);
}
@@ -2370,23 +2370,9 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
// Store the value in the cell.
__ movq(cell_operand, rax);
- Label done;
- __ JumpIfSmi(rax, &done);
-
- __ movq(rcx, rax);
- __ lea(rdx, cell_operand);
- // Cells are always in the remembered set.
- __ RecordWrite(rbx, // Object.
- rdx, // Address.
- rcx, // Value.
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
-
+ // Cells are always rescanned, so no write barrier here.
// Return the value (register rax).
- __ bind(&done);
-
Counters* counters = isolate()->counters();
__ IncrementCounter(counters->named_store_global_inline(), 1);
__ ret(0);
@@ -2578,7 +2564,7 @@ Handle<Code> LoadStubCompiler::CompileLoadCallback(
Handle<Code> LoadStubCompiler::CompileLoadConstant(Handle<JSObject> object,
Handle<JSObject> holder,
- Handle<Object> value,
+ Handle<JSFunction> value,
Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -2732,7 +2718,7 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadConstant(
Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- Handle<Object> value) {
+ Handle<JSFunction> value) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
diff --git a/deps/v8/test/cctest/SConscript b/deps/v8/test/cctest/SConscript
index 621d8ecf6..5fc9188ee 100644
--- a/deps/v8/test/cctest/SConscript
+++ b/deps/v8/test/cctest/SConscript
@@ -110,7 +110,8 @@ SOURCES = {
],
'arch:x64': ['test-assembler-x64.cc',
'test-macro-assembler-x64.cc',
- 'test-log-stack-tracer.cc'],
+ 'test-log-stack-tracer.cc',
+ 'test-disasm-x64.cc'],
'arch:mips': ['test-assembler-mips.cc',
'test-disasm-mips.cc'],
'os:linux': ['test-platform-linux.cc'],
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index 7161345ec..2de0afba1 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -85,9 +85,11 @@ test-debug/DebugBreakLoop: SKIP
##############################################################################
[ $arch == mips ]
-test-deoptimization: SKIP
test-serialize: SKIP
-# Tests that may time out.
-test-api/ExternalArrays: PASS || TIMEOUT
-test-api/Threading: PASS || TIMEOUT
+##############################################################################
+[ $arch == mips && $crankshaft ]
+
+# Tests that time out with crankshaft.
+test-debug/ThreadedDebugging: SKIP
+test-debug/DebugBreakLoop: SKIP
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 87e322c96..8b618d490 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -13438,6 +13438,8 @@ TEST(SourceURLInStackTrace) {
// Test that idle notification can be handled and eventually returns true.
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
THREADED_TEST(IdleNotification) {
v8::HandleScope scope;
LocalContext env;
@@ -13454,19 +13456,17 @@ THREADED_TEST(IdleNotification) {
"};"
"binom(1000, 500)");
bool rv = false;
- intptr_t old_size = HEAP->SizeOfObjects();
- bool no_idle_work = v8::V8::IdleNotification();
for (int i = 0; i < 100; i++) {
rv = v8::V8::IdleNotification();
if (rv)
break;
}
CHECK(rv == true);
- intptr_t new_size = HEAP->SizeOfObjects();
- CHECK(no_idle_work || new_size < 3 * old_size / 4);
}
// Test that idle notification can be handled and eventually returns true.
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
THREADED_TEST(IdleNotificationWithHint) {
v8::HandleScope scope;
LocalContext env;
@@ -13492,7 +13492,7 @@ THREADED_TEST(IdleNotificationWithHint) {
}
CHECK(rv == true);
intptr_t new_size = HEAP->SizeOfObjects();
- CHECK(no_idle_work || new_size < 3 * old_size / 4);
+ CHECK(no_idle_work || new_size < old_size);
}
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index cdab8f7cb..815e61849 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -408,4 +408,72 @@ TEST(AssemblerIa3210) {
__ nop();
}
+
+TEST(AssemblerMultiByteNop) {
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::internal::byte buffer[1024];
+ Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+ __ push(ebx);
+ __ push(ecx);
+ __ push(edx);
+ __ push(edi);
+ __ push(esi);
+ __ mov(eax, 1);
+ __ mov(ebx, 2);
+ __ mov(ecx, 3);
+ __ mov(edx, 4);
+ __ mov(edi, 5);
+ __ mov(esi, 6);
+ for (int i = 0; i < 16; i++) {
+ int before = assm.pc_offset();
+ __ Nop(i);
+ CHECK_EQ(assm.pc_offset() - before, i);
+ }
+
+ Label fail;
+ __ cmp(eax, 1);
+ __ j(not_equal, &fail);
+ __ cmp(ebx, 2);
+ __ j(not_equal, &fail);
+ __ cmp(ecx, 3);
+ __ j(not_equal, &fail);
+ __ cmp(edx, 4);
+ __ j(not_equal, &fail);
+ __ cmp(edi, 5);
+ __ j(not_equal, &fail);
+ __ cmp(esi, 6);
+ __ j(not_equal, &fail);
+ __ mov(eax, 42);
+ __ pop(esi);
+ __ pop(edi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+ __ ret(0);
+ __ bind(&fail);
+ __ mov(eax, 13);
+ __ pop(esi);
+ __ pop(edi);
+ __ pop(edx);
+ __ pop(ecx);
+ __ pop(ebx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked());
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ CHECK_EQ(42, res);
+}
+
+
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index 28f7c9b70..959cf3fe5 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -36,6 +36,7 @@
#include "cctest.h"
using v8::internal::Assembler;
+using v8::internal::Code;
using v8::internal::CodeDesc;
using v8::internal::FUNCTION_CAST;
using v8::internal::Immediate;
@@ -53,6 +54,7 @@ using v8::internal::r15;
using v8::internal::r8;
using v8::internal::r9;
using v8::internal::rax;
+using v8::internal::rbx;
using v8::internal::rbp;
using v8::internal::rcx;
using v8::internal::rdi;
@@ -86,6 +88,16 @@ static const v8::internal::Register arg2 = rsi;
#define __ assm.
+static v8::Persistent<v8::Context> env;
+
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+
+
TEST(AssemblerX64ReturnOperation) {
OS::Setup();
// Allocate an executable page of memory.
@@ -359,4 +371,73 @@ TEST(AssemblerX64LabelChaining) {
__ nop();
}
+
+TEST(AssemblerMultiByteNop) {
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::internal::byte buffer[1024];
+ Assembler assm(Isolate::Current(), buffer, sizeof(buffer));
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rdx);
+ __ push(rdi);
+ __ push(rsi);
+ __ movq(rax, Immediate(1));
+ __ movq(rbx, Immediate(2));
+ __ movq(rcx, Immediate(3));
+ __ movq(rdx, Immediate(4));
+ __ movq(rdi, Immediate(5));
+ __ movq(rsi, Immediate(6));
+ for (int i = 0; i < 16; i++) {
+ int before = assm.pc_offset();
+ __ Nop(i);
+ CHECK_EQ(assm.pc_offset() - before, i);
+ }
+
+ Label fail;
+ __ cmpq(rax, Immediate(1));
+ __ j(not_equal, &fail);
+ __ cmpq(rbx, Immediate(2));
+ __ j(not_equal, &fail);
+ __ cmpq(rcx, Immediate(3));
+ __ j(not_equal, &fail);
+ __ cmpq(rdx, Immediate(4));
+ __ j(not_equal, &fail);
+ __ cmpq(rdi, Immediate(5));
+ __ j(not_equal, &fail);
+ __ cmpq(rsi, Immediate(6));
+ __ j(not_equal, &fail);
+ __ movq(rax, Immediate(42));
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rdx);
+ __ pop(rcx);
+ __ pop(rbx);
+ __ ret(0);
+ __ bind(&fail);
+ __ movq(rax, Immediate(13));
+ __ pop(rsi);
+ __ pop(rdi);
+ __ pop(rdx);
+ __ pop(rcx);
+ __ pop(rbx);
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Code* code = Code::cast(HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ v8::internal::Handle<v8::internal::Object>(
+ HEAP->undefined_value()))->ToObjectChecked());
+ CHECK(code->IsCode());
+
+ F0 f = FUNCTION_CAST<F0>(code->entry());
+ int res = f();
+ CHECK_EQ(42, res);
+}
+
+
+
+
#undef __
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index a9e28362f..8543a3772 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -6869,7 +6869,7 @@ TEST(DebugBreakFunctionApply) {
foo->Call(env->Global(), 0, NULL);
// When keeping the debug break several break will happen.
- CHECK_EQ(3, break_point_hit_count);
+ CHECK_GT(break_point_hit_count, 1);
v8::Debug::SetDebugEventListener(NULL);
CheckDebuggerUnloaded();
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 1e38e4ea7..da0950543 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -449,6 +449,11 @@ TEST(DisasmIa320) {
}
}
+ // Nop instructions
+ for (int i = 0; i < 16; i++) {
+ __ Nop(i);
+ }
+
__ ret(0);
CodeDesc desc;
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
new file mode 100644
index 000000000..da85eb933
--- /dev/null
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -0,0 +1,429 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "v8.h"
+
+#include "debug.h"
+#include "disasm.h"
+#include "disassembler.h"
+#include "macro-assembler.h"
+#include "serialize.h"
+#include "cctest.h"
+
+using namespace v8::internal;
+
+static v8::Persistent<v8::Context> env;
+
+static void InitializeVM() {
+ if (env.IsEmpty()) {
+ env = v8::Context::New();
+ }
+}
+
+
+#define __ assm.
+
+
+static void DummyStaticFunction(Object* result) {
+}
+
+
+TEST(DisasmX64) {
+ InitializeVM();
+ v8::HandleScope scope;
+ v8::internal::byte buffer[2048];
+ Assembler assm(Isolate::Current(), buffer, sizeof buffer);
+ DummyStaticFunction(NULL); // just bloody use it (DELETE; debugging)
+
+ // Short immediate instructions
+ __ addq(rax, Immediate(12345678));
+ __ or_(rax, Immediate(12345678));
+ __ subq(rax, Immediate(12345678));
+ __ xor_(rax, Immediate(12345678));
+ __ and_(rax, Immediate(12345678));
+
+ // ---- This one caused crash
+ __ movq(rbx, Operand(rsp, rcx, times_2, 0)); // [rsp+rcx*4]
+
+ // ---- All instructions that I can think of
+ __ addq(rdx, rbx);
+ __ addq(rdx, Operand(rbx, 0));
+ __ addq(rdx, Operand(rbx, 16));
+ __ addq(rdx, Operand(rbx, 1999));
+ __ addq(rdx, Operand(rsp, 0));
+ __ addq(rdx, Operand(rsp, 16));
+ __ addq(rdx, Operand(rsp, 1999));
+ __ nop();
+ __ addq(rdi, Operand(rbp, rcx, times_4, 0));
+ __ addq(rdi, Operand(rbp, rcx, times_4, 12));
+ __ addq(Operand(rbp, rcx, times_4, 12), Immediate(12));
+
+ __ nop();
+ __ addq(rbx, Immediate(12));
+ __ nop();
+ __ nop();
+ __ and_(rdx, Immediate(3));
+ __ and_(rdx, Operand(rsp, 4));
+ __ cmpq(rdx, Immediate(3));
+ __ cmpq(rdx, Operand(rsp, 4));
+ __ cmpq(Operand(rbp, rcx, times_4, 0), Immediate(1000));
+ __ cmpb(rbx, Operand(rbp, rcx, times_2, 0));
+ __ cmpb(Operand(rbp, rcx, times_2, 0), rbx);
+ __ or_(rdx, Immediate(3));
+ __ xor_(rdx, Immediate(3));
+ __ nop();
+ {
+ CHECK(CpuFeatures::IsSupported(CPUID));
+ CpuFeatures::Scope fscope(CPUID);
+ __ cpuid();
+ }
+ {
+ CHECK(CpuFeatures::IsSupported(RDTSC));
+ CpuFeatures::Scope fscope(RDTSC);
+ __ rdtsc();
+ }
+ __ movsxbq(rdx, Operand(rcx, 0));
+ __ movsxwq(rdx, Operand(rcx, 0));
+ __ movzxbl(rdx, Operand(rcx, 0));
+ __ movzxwl(rdx, Operand(rcx, 0));
+ __ movzxbq(rdx, Operand(rcx, 0));
+ __ movzxwq(rdx, Operand(rcx, 0));
+
+ __ nop();
+ __ imul(rdx, rcx);
+ __ shld(rdx, rcx);
+ __ shrd(rdx, rcx);
+ __ bts(Operand(rdx, 0), rcx);
+ __ bts(Operand(rbx, rcx, times_4, 0), rcx);
+ __ nop();
+ __ push(Immediate(12));
+ __ push(Immediate(23456));
+ __ push(rcx);
+ __ push(rsi);
+ __ push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(Operand(rbx, rcx, times_4, 0));
+ __ push(Operand(rbx, rcx, times_4, 0));
+ __ push(Operand(rbx, rcx, times_4, 10000));
+ __ pop(rdx);
+ __ pop(rax);
+ __ pop(Operand(rbx, rcx, times_4, 0));
+ __ nop();
+
+ __ addq(rdx, Operand(rsp, 16));
+ __ addq(rdx, rcx);
+ __ movb(rdx, Operand(rcx, 0));
+ __ movb(rcx, Immediate(6));
+ __ movb(Operand(rsp, 16), rdx);
+ __ movw(Operand(rsp, 16), rdx);
+ __ nop();
+ __ movsxwq(rdx, Operand(rsp, 12));
+ __ movsxbq(rdx, Operand(rsp, 12));
+ __ movsxlq(rdx, Operand(rsp, 12));
+ __ movzxwq(rdx, Operand(rsp, 12));
+ __ movzxbq(rdx, Operand(rsp, 12));
+ __ nop();
+ __ movq(rdx, Immediate(1234567));
+ __ movq(rdx, Operand(rsp, 12));
+ __ movq(Operand(rbx, rcx, times_4, 10000), Immediate(12345));
+ __ movq(Operand(rbx, rcx, times_4, 10000), rdx);
+ __ nop();
+ __ decb(rdx);
+ __ decb(Operand(rax, 10));
+ __ decb(Operand(rbx, rcx, times_4, 10000));
+ __ decq(rdx);
+ __ cdq();
+
+ __ nop();
+ __ idivq(rdx);
+ __ mul(rdx);
+ __ neg(rdx);
+ __ not_(rdx);
+ __ testq(Operand(rbx, rcx, times_4, 10000), rdx);
+
+ __ imul(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ imul(rdx, rcx, Immediate(12));
+ __ imul(rdx, rcx, Immediate(1000));
+
+ __ incq(rdx);
+ __ incq(Operand(rbx, rcx, times_4, 10000));
+ __ push(Operand(rbx, rcx, times_4, 10000));
+ __ pop(Operand(rbx, rcx, times_4, 10000));
+ __ jmp(Operand(rbx, rcx, times_4, 10000));
+
+ __ lea(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ or_(rdx, Immediate(12345));
+ __ or_(rdx, Operand(rbx, rcx, times_4, 10000));
+
+ __ nop();
+
+ __ rcl(rdx, Immediate(1));
+ __ rcl(rdx, Immediate(7));
+ __ rcr(rdx, Immediate(1));
+ __ rcr(rdx, Immediate(7));
+ __ sar(rdx, Immediate(1));
+ __ sar(rdx, Immediate(6));
+ __ sar_cl(rdx);
+ __ sbbq(rdx, rbx);
+ __ shld(rdx, rbx);
+ __ shl(rdx, Immediate(1));
+ __ shl(rdx, Immediate(6));
+ __ shl_cl(rdx);
+ __ shrd(rdx, rbx);
+ __ shr(rdx, Immediate(1));
+ __ shr(rdx, Immediate(7));
+ __ shr_cl(rdx);
+
+
+ // Immediates
+
+ __ addq(rbx, Immediate(12));
+ __ addq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+
+ __ and_(rbx, Immediate(12345));
+
+ __ cmpq(rbx, Immediate(12345));
+ __ cmpq(rbx, Immediate(12));
+ __ cmpq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+ __ cmpb(rax, Immediate(100));
+
+ __ or_(rbx, Immediate(12345));
+
+ __ subq(rbx, Immediate(12));
+ __ subq(Operand(rdx, rcx, times_4, 10000), Immediate(12));
+
+ __ xor_(rbx, Immediate(12345));
+
+ __ imul(rdx, rcx, Immediate(12));
+ __ imul(rdx, rcx, Immediate(1000));
+
+ __ cld();
+
+ __ subq(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ subq(rdx, rbx);
+
+ __ testq(rdx, Immediate(12345));
+ __ testq(Operand(rbx, rcx, times_8, 10000), rdx);
+ __ testb(Operand(rcx, rbx, times_2, 1000), rdx);
+ __ testb(Operand(rax, -20), Immediate(0x9A));
+ __ nop();
+
+ __ xor_(rdx, Immediate(12345));
+ __ xor_(rdx, Operand(rbx, rcx, times_8, 10000));
+ __ bts(Operand(rbx, rcx, times_8, 10000), rdx);
+ __ hlt();
+ __ int3();
+ __ ret(0);
+ __ ret(8);
+
+ // Calls
+
+ Label L1, L2;
+ __ bind(&L1);
+ __ nop();
+ __ call(&L1);
+ __ call(&L2);
+ __ nop();
+ __ bind(&L2);
+ __ call(Operand(rbx, rcx, times_4, 10000));
+ __ nop();
+ Handle<Code> ic(Isolate::Current()->builtins()->builtin(
+ Builtins::kLoadIC_Initialize));
+ __ call(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+ __ nop();
+
+ __ jmp(&L1);
+ __ jmp(Operand(rbx, rcx, times_4, 10000));
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ ExternalReference after_break_target =
+ ExternalReference(Debug_Address::AfterBreakTarget(),
+ assm.isolate());
+#endif // ENABLE_DEBUGGER_SUPPORT
+ __ jmp(ic, RelocInfo::CODE_TARGET);
+ __ nop();
+
+
+ Label Ljcc;
+ __ nop();
+ // long jumps
+ __ j(overflow, &Ljcc);
+ __ j(no_overflow, &Ljcc);
+ __ j(below, &Ljcc);
+ __ j(above_equal, &Ljcc);
+ __ j(equal, &Ljcc);
+ __ j(not_equal, &Ljcc);
+ __ j(below_equal, &Ljcc);
+ __ j(above, &Ljcc);
+ __ j(sign, &Ljcc);
+ __ j(not_sign, &Ljcc);
+ __ j(parity_even, &Ljcc);
+ __ j(parity_odd, &Ljcc);
+ __ j(less, &Ljcc);
+ __ j(greater_equal, &Ljcc);
+ __ j(less_equal, &Ljcc);
+ __ j(greater, &Ljcc);
+ __ nop();
+ __ bind(&Ljcc);
+ // short jumps
+ __ j(overflow, &Ljcc);
+ __ j(no_overflow, &Ljcc);
+ __ j(below, &Ljcc);
+ __ j(above_equal, &Ljcc);
+ __ j(equal, &Ljcc);
+ __ j(not_equal, &Ljcc);
+ __ j(below_equal, &Ljcc);
+ __ j(above, &Ljcc);
+ __ j(sign, &Ljcc);
+ __ j(not_sign, &Ljcc);
+ __ j(parity_even, &Ljcc);
+ __ j(parity_odd, &Ljcc);
+ __ j(less, &Ljcc);
+ __ j(greater_equal, &Ljcc);
+ __ j(less_equal, &Ljcc);
+ __ j(greater, &Ljcc);
+
+ // 0xD9 instructions
+ __ nop();
+
+ __ fld(1);
+ __ fld1();
+ __ fldz();
+ __ fldpi();
+ __ fabs();
+ __ fchs();
+ __ fprem();
+ __ fprem1();
+ __ fincstp();
+ __ ftst();
+ __ fxch(3);
+ __ fld_s(Operand(rbx, rcx, times_4, 10000));
+ __ fstp_s(Operand(rbx, rcx, times_4, 10000));
+ __ ffree(3);
+ __ fld_d(Operand(rbx, rcx, times_4, 10000));
+ __ fstp_d(Operand(rbx, rcx, times_4, 10000));
+ __ nop();
+
+ __ fild_s(Operand(rbx, rcx, times_4, 10000));
+ __ fistp_s(Operand(rbx, rcx, times_4, 10000));
+ __ fild_d(Operand(rbx, rcx, times_4, 10000));
+ __ fistp_d(Operand(rbx, rcx, times_4, 10000));
+ __ fnstsw_ax();
+ __ nop();
+ __ fadd(3);
+ __ fsub(3);
+ __ fmul(3);
+ __ fdiv(3);
+
+ __ faddp(3);
+ __ fsubp(3);
+ __ fmulp(3);
+ __ fdivp(3);
+ __ fcompp();
+ __ fwait();
+ __ nop();
+ {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ cvttss2si(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ cvttss2si(rdx, xmm1);
+ __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
+ __ cvttsd2si(rdx, xmm1);
+ __ cvttsd2siq(rdx, xmm1);
+ __ addsd(xmm1, xmm0);
+ __ mulsd(xmm1, xmm0);
+ __ subsd(xmm1, xmm0);
+ __ divsd(xmm1, xmm0);
+ __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
+ __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
+ __ ucomisd(xmm0, xmm1);
+
+ // 128 bit move instructions.
+ __ movdqa(xmm0, Operand(rbx, rcx, times_4, 10000));
+ __ movdqa(Operand(rbx, rcx, times_4, 10000), xmm0);
+ }
+ }
+
+ // cmov.
+ {
+ if (CpuFeatures::IsSupported(CMOV)) {
+ CpuFeatures::Scope use_cmov(CMOV);
+ __ cmovq(overflow, rax, Operand(rax, 0));
+ __ cmovq(no_overflow, rax, Operand(rax, 1));
+ __ cmovq(below, rax, Operand(rax, 2));
+ __ cmovq(above_equal, rax, Operand(rax, 3));
+ __ cmovq(equal, rax, Operand(rbx, 0));
+ __ cmovq(not_equal, rax, Operand(rbx, 1));
+ __ cmovq(below_equal, rax, Operand(rbx, 2));
+ __ cmovq(above, rax, Operand(rbx, 3));
+ __ cmovq(sign, rax, Operand(rcx, 0));
+ __ cmovq(not_sign, rax, Operand(rcx, 1));
+ __ cmovq(parity_even, rax, Operand(rcx, 2));
+ __ cmovq(parity_odd, rax, Operand(rcx, 3));
+ __ cmovq(less, rax, Operand(rdx, 0));
+ __ cmovq(greater_equal, rax, Operand(rdx, 1));
+ __ cmovq(less_equal, rax, Operand(rdx, 2));
+ __ cmovq(greater, rax, Operand(rdx, 3));
+ }
+ }
+
+ // andpd, etc.
+ {
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope fscope(SSE2);
+ __ andpd(xmm0, xmm1);
+ __ andpd(xmm1, xmm2);
+
+ __ movaps(xmm0, xmm1);
+ __ movaps(xmm1, xmm2);
+ }
+ }
+
+ // Nop instructions
+ for (int i = 0; i < 16; i++) {
+ __ Nop(i);
+ }
+
+ __ ret(0);
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Object>(HEAP->undefined_value()))->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef OBJECT_PRINT
+ Code::cast(code)->Print();
+ byte* begin = Code::cast(code)->instruction_start();
+ byte* end = begin + Code::cast(code)->instruction_size();
+ disasm::Disassembler::Disassemble(stdout, begin, end);
+#endif
+}
+
+#undef __
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 81b68a713..4b211b4b5 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -585,6 +585,22 @@ TEST(HeapSnapshotJSONSerializationAborting) {
}
+static void CheckChildrenIds(const v8::HeapSnapshot* snapshot,
+ const v8::HeapGraphNode* node,
+ int level, int max_level) {
+ if (level > max_level) return;
+ CHECK_EQ(node, snapshot->GetNodeById(node->GetId()));
+ for (int i = 0, count = node->GetChildrenCount(); i < count; ++i) {
+ const v8::HeapGraphEdge* prop = node->GetChild(i);
+ const v8::HeapGraphNode* child =
+ snapshot->GetNodeById(prop->GetToNode()->GetId());
+ CHECK_EQ_UINT64_T(prop->GetToNode()->GetId(), child->GetId());
+ CHECK_EQ(prop->GetToNode(), child);
+ CheckChildrenIds(snapshot, child, level + 1, max_level);
+ }
+}
+
+
TEST(HeapSnapshotGetNodeById) {
v8::HandleScope scope;
LocalContext env;
@@ -592,12 +608,7 @@ TEST(HeapSnapshotGetNodeById) {
const v8::HeapSnapshot* snapshot =
v8::HeapProfiler::TakeSnapshot(v8_str("id"));
const v8::HeapGraphNode* root = snapshot->GetRoot();
- CHECK_EQ(root, snapshot->GetNodeById(root->GetId()));
- for (int i = 0, count = root->GetChildrenCount(); i < count; ++i) {
- const v8::HeapGraphEdge* prop = root->GetChild(i);
- CHECK_EQ(
- prop->GetToNode(), snapshot->GetNodeById(prop->GetToNode()->GetId()));
- }
+ CheckChildrenIds(snapshot, root, 0, 3);
// Check a big id, which should not exist yet.
CHECK_EQ(NULL, snapshot->GetNodeById(0x1000000UL));
}
@@ -1065,3 +1076,82 @@ TEST(FastCaseGetter) {
GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set-propWithSetter");
CHECK_NE(NULL, setterFunction);
}
+
+
+bool HasWeakEdge(const v8::HeapGraphNode* node) {
+ for (int i = 0; i < node->GetChildrenCount(); ++i) {
+ const v8::HeapGraphEdge* handle_edge = node->GetChild(i);
+ if (handle_edge->GetType() == v8::HeapGraphEdge::kWeak) return true;
+ }
+ return false;
+}
+
+
+bool HasWeakGlobalHandle() {
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("weaks"));
+ const v8::HeapGraphNode* gc_roots = GetNode(
+ snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(GC roots)");
+ CHECK_NE(NULL, gc_roots);
+ const v8::HeapGraphNode* global_handles = GetNode(
+ gc_roots, v8::HeapGraphNode::kObject, "(Global handles)");
+ CHECK_NE(NULL, global_handles);
+ return HasWeakEdge(global_handles);
+}
+
+
+static void PersistentHandleCallback(v8::Persistent<v8::Value> handle, void*) {
+ handle.Dispose();
+}
+
+
+TEST(WeakGlobalHandle) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CHECK(!HasWeakGlobalHandle());
+
+ v8::Persistent<v8::Object> handle =
+ v8::Persistent<v8::Object>::New(v8::Object::New());
+ handle.MakeWeak(NULL, PersistentHandleCallback);
+
+ CHECK(HasWeakGlobalHandle());
+}
+
+
+TEST(WeakGlobalContextRefs) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("weaks"));
+ const v8::HeapGraphNode* gc_roots = GetNode(
+ snapshot->GetRoot(), v8::HeapGraphNode::kObject, "(GC roots)");
+ CHECK_NE(NULL, gc_roots);
+ const v8::HeapGraphNode* global_handles = GetNode(
+ gc_roots, v8::HeapGraphNode::kObject, "(Global handles)");
+ CHECK_NE(NULL, global_handles);
+ const v8::HeapGraphNode* global_context = GetNode(
+ global_handles, v8::HeapGraphNode::kHidden, "system / GlobalContext");
+ CHECK_NE(NULL, global_context);
+ CHECK(HasWeakEdge(global_context));
+}
+
+
+TEST(SfiAndJsFunctionWeakRefs) {
+ v8::HandleScope scope;
+ LocalContext env;
+
+ CompileRun(
+ "fun = (function (x) { return function () { return x + 1; } })(1);");
+ const v8::HeapSnapshot* snapshot =
+ v8::HeapProfiler::TakeSnapshot(v8_str("fun"));
+ const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+ CHECK_NE(NULL, global);
+ const v8::HeapGraphNode* fun =
+ GetProperty(global, v8::HeapGraphEdge::kShortcut, "fun");
+ CHECK(HasWeakEdge(fun));
+ const v8::HeapGraphNode* shared =
+ GetProperty(fun, v8::HeapGraphEdge::kInternal, "shared");
+ CHECK(HasWeakEdge(shared));
+}
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 71f9ac5b3..014eefb5b 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -1290,7 +1290,8 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
CHECK(old_capacity == new_capacity);
}
-
+// This just checks the contract of the IdleNotification() function,
+// and does not verify that it does reasonable work.
TEST(IdleNotificationAdvancesIncrementalMarking) {
if (!FLAG_incremental_marking || !FLAG_incremental_marking_steps) return;
InitializeVM();
@@ -1312,8 +1313,8 @@ TEST(IdleNotificationAdvancesIncrementalMarking) {
CompileRun(source);
}
intptr_t old_size = HEAP->SizeOfObjects();
- bool no_idle_work = v8::V8::IdleNotification();
- while (!v8::V8::IdleNotification()) ;
+ bool no_idle_work = v8::V8::IdleNotification(900);
+ while (!v8::V8::IdleNotification(900)) ;
intptr_t new_size = HEAP->SizeOfObjects();
- CHECK(no_idle_work || new_size < 3 * old_size / 4);
+ CHECK(no_idle_work || new_size < old_size);
}
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 6379f3e31..3c66c4c80 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -27,6 +27,14 @@
#include <stdlib.h>
+#ifdef __linux__
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <errno.h>
+#endif
+
#include "v8.h"
#include "global-handles.h"
@@ -444,3 +452,87 @@ TEST(EmptyObjectGroups) {
global_handles->AddImplicitReferences(
Handle<HeapObject>::cast(object).location(), NULL, 0);
}
+
+
+// Here is a memory use test that uses /proc, and is therefore Linux-only. We
+// do not care how much memory the simulator uses, since it is only there for
+// debugging purposes.
+#if defined(__linux__) && !defined(USE_SIMULATOR)
+
+
+static uintptr_t ReadLong(char* buffer, intptr_t* position, int base) {
+ char* end_address = buffer + *position;
+ uintptr_t result = strtoul(buffer + *position, &end_address, base);
+ CHECK(result != ULONG_MAX || errno != ERANGE);
+ CHECK(end_address > buffer + *position);
+ *position = end_address - buffer;
+ return result;
+}
+
+
+static intptr_t MemoryInUse() {
+ intptr_t memory_use = 0;
+
+ int fd = open("/proc/self/maps", O_RDONLY);
+ if (fd < 0) return -1;
+
+ const int kBufSize = 10000;
+ char buffer[kBufSize];
+ int length = read(fd, buffer, kBufSize);
+ intptr_t line_start = 0;
+ CHECK_LT(length, kBufSize); // Make the buffer bigger.
+ CHECK_GT(length, 0); // We have to find some data in the file.
+ while (line_start < length) {
+ if (buffer[line_start] == '\n') {
+ line_start++;
+ continue;
+ }
+ intptr_t position = line_start;
+ uintptr_t start = ReadLong(buffer, &position, 16);
+ CHECK_EQ(buffer[position++], '-');
+ uintptr_t end = ReadLong(buffer, &position, 16);
+ CHECK_EQ(buffer[position++], ' ');
+ CHECK(buffer[position] == '-' || buffer[position] == 'r');
+ bool read_permission = (buffer[position++] == 'r');
+ CHECK(buffer[position] == '-' || buffer[position] == 'w');
+ bool write_permission = (buffer[position++] == 'w');
+ CHECK(buffer[position] == '-' || buffer[position] == 'x');
+ bool execute_permission = (buffer[position++] == 'x');
+ CHECK(buffer[position] == '-' || buffer[position] == 'p');
+ bool private_mapping = (buffer[position++] == 'p');
+ CHECK_EQ(buffer[position++], ' ');
+ uintptr_t offset = ReadLong(buffer, &position, 16);
+ USE(offset);
+ CHECK_EQ(buffer[position++], ' ');
+ uintptr_t major = ReadLong(buffer, &position, 16);
+ USE(major);
+ CHECK_EQ(buffer[position++], ':');
+ uintptr_t minor = ReadLong(buffer, &position, 16);
+ USE(minor);
+ CHECK_EQ(buffer[position++], ' ');
+ uintptr_t inode = ReadLong(buffer, &position, 10);
+ while (position < length && buffer[position] != '\n') position++;
+ if ((read_permission || write_permission || execute_permission) &&
+ private_mapping && inode == 0) {
+ memory_use += (end - start);
+ }
+
+ line_start = position;
+ }
+ close(fd);
+ return memory_use;
+}
+
+
+TEST(BootUpMemoryUse) {
+ intptr_t initial_memory = MemoryInUse();
+ // Only Linux has the proc filesystem and only if it is mapped. If it's not
+ // there we just skip the test.
+ if (initial_memory >= 0) {
+ InitializeVM();
+ intptr_t booted_memory = MemoryInUse();
+ CHECK_LE(booted_memory - initial_memory, 16 * 1024 * 1024);
+ }
+}
+
+#endif // __linux__ and !USE_SIMULATOR
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index bd1e24e11..6f394b672 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -884,3 +884,201 @@ TEST(ScopePositions) {
CHECK_EQ(inner_scope->end_position(), kPrefixLen + kInnerLen);
}
}
+
+
+void TestParserSync(i::Handle<i::String> source, int flags) {
+ uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
+ bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
+
+ // Preparse the data.
+ i::CompleteParserRecorder log;
+ i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::GenericStringUC16CharacterStream stream(source, 0, source->length());
+ scanner.SetHarmonyScoping(harmony_scoping);
+ scanner.Initialize(&stream);
+ v8::preparser::PreParser::PreParseResult result =
+ v8::preparser::PreParser::PreParseProgram(
+ &scanner, &log, flags, stack_limit);
+ CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ i::ScriptDataImpl data(log.ExtractData());
+
+ // Parse the data
+ i::Handle<i::Script> script = FACTORY->NewScript(source);
+ bool save_harmony_scoping = i::FLAG_harmony_scoping;
+ i::FLAG_harmony_scoping = harmony_scoping;
+ i::Parser parser(script, flags, NULL, NULL);
+ i::CompilationInfo info(script);
+ info.MarkAsGlobal();
+ i::FunctionLiteral* function = parser.ParseProgram(&info);
+ i::FLAG_harmony_scoping = save_harmony_scoping;
+
+ i::String* type_string = NULL;
+ if (function == NULL) {
+ // Extract exception from the parser.
+ i::Handle<i::String> type_symbol = FACTORY->LookupAsciiSymbol("type");
+ CHECK(i::Isolate::Current()->has_pending_exception());
+ i::MaybeObject* maybe_object = i::Isolate::Current()->pending_exception();
+ i::JSObject* exception = NULL;
+ CHECK(maybe_object->To(&exception));
+
+ // Get the type string.
+ maybe_object = exception->GetProperty(*type_symbol);
+ CHECK(maybe_object->To(&type_string));
+ }
+
+ // Check that preparsing fails iff parsing fails.
+ if (data.has_error() && function != NULL) {
+ i::OS::Print(
+ "Preparser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the parser succeeded",
+ *source->ToCString(), data.BuildMessage());
+ CHECK(false);
+ } else if (!data.has_error() && function == NULL) {
+ i::OS::Print(
+ "Parser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the preparser succeeded",
+ *source->ToCString(), *type_string->ToCString());
+ CHECK(false);
+ }
+
+ // Check that preparser and parser produce the same error.
+ if (function == NULL) {
+ if (!type_string->IsEqualTo(i::CStrVector(data.BuildMessage()))) {
+ i::OS::Print(
+ "Expected parser and preparser to produce the same error on:\n"
+ "\t%s\n"
+ "However, found the following error messages\n"
+ "\tparser: %s\n"
+ "\tpreparser: %s\n",
+ *source->ToCString(), *type_string->ToCString(), data.BuildMessage());
+ CHECK(false);
+ }
+ }
+}
+
+
+void TestParserSyncWithFlags(i::Handle<i::String> source) {
+ static const int kFlagsCount = 6;
+ const int flags[kFlagsCount] = {
+ i::kNoParsingFlags | i::CLASSIC_MODE,
+ i::kNoParsingFlags | i::STRICT_MODE,
+ i::kNoParsingFlags | i::EXTENDED_MODE,
+ i::kAllowLazy | i::CLASSIC_MODE,
+ i::kAllowLazy | i::STRICT_MODE,
+ i::kAllowLazy | i::EXTENDED_MODE
+ };
+
+ for (int k = 0; k < kFlagsCount; ++k) {
+ TestParserSync(source, flags[k]);
+ }
+}
+
+
+TEST(ParserSync) {
+ const char* context_data[][2] = {
+ { "", "" },
+ { "{", "}" },
+ { "if (true) ", " else {}" },
+ { "if (true) {} else ", "" },
+ { "if (true) ", "" },
+ { "do ", " while (false)" },
+ { "while (false) ", "" },
+ { "for (;;) ", "" },
+ { "with ({})", "" },
+ { "switch (12) { case 12: ", "}" },
+ { "switch (12) { default: ", "}" },
+ { "label2: ", "" },
+ { NULL, NULL }
+ };
+
+ const char* statement_data[] = {
+ "{}",
+ "var x",
+ "var x = 1",
+ "const x",
+ "const x = 1",
+ ";",
+ "12",
+ "if (false) {} else ;",
+ "if (false) {} else {}",
+ "if (false) {} else 12",
+ "if (false) ;"
+ "if (false) {}",
+ "if (false) 12",
+ "do {} while (false)",
+ "for (;;) ;",
+ "for (;;) {}",
+ "for (;;) 12",
+ "continue",
+ "continue label",
+ "continue\nlabel",
+ "break",
+ "break label",
+ "break\nlabel",
+ "return",
+ "return 12",
+ "return\n12",
+ "with ({}) ;",
+ "with ({}) {}",
+ "with ({}) 12",
+ "switch ({}) { default: }"
+ "label3: "
+ "throw",
+ "throw 12",
+ "throw\n12",
+ "try {} catch(e) {}",
+ "try {} finally {}",
+ "try {} catch(e) {} finally {}",
+ "debugger",
+ NULL
+ };
+
+ const char* termination_data[] = {
+ "",
+ ";",
+ "\n",
+ ";\n",
+ "\n;",
+ NULL
+ };
+
+ v8::HandleScope handles;
+ v8::Persistent<v8::Context> context = v8::Context::New();
+ v8::Context::Scope context_scope(context);
+
+ int marker;
+ i::Isolate::Current()->stack_guard()->SetStackLimit(
+ reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+
+ for (int i = 0; context_data[i][0] != NULL; ++i) {
+ for (int j = 0; statement_data[j] != NULL; ++j) {
+ for (int k = 0; termination_data[k] != NULL; ++k) {
+ int kPrefixLen = i::StrLength(context_data[i][0]);
+ int kStatementLen = i::StrLength(statement_data[j]);
+ int kTerminationLen = i::StrLength(termination_data[k]);
+ int kSuffixLen = i::StrLength(context_data[i][1]);
+ int kProgramSize = kPrefixLen + kStatementLen + kTerminationLen
+ + kSuffixLen + i::StrLength("label: for (;;) { }");
+
+ // Plug the source code pieces together.
+ i::Vector<char> program = i::Vector<char>::New(kProgramSize + 1);
+ int length = i::OS::SNPrintF(program,
+ "label: for (;;) { %s%s%s%s }",
+ context_data[i][0],
+ statement_data[j],
+ termination_data[k],
+ context_data[i][1]);
+ CHECK(length == kProgramSize);
+ i::Handle<i::String> source =
+ FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
+ TestParserSyncWithFlags(source);
+ }
+ }
+ }
+}
diff --git a/deps/v8/test/es5conform/es5conform.status b/deps/v8/test/es5conform/es5conform.status
index bf3ee8bb5..12ebf903e 100644
--- a/deps/v8/test/es5conform/es5conform.status
+++ b/deps/v8/test/es5conform/es5conform.status
@@ -314,8 +314,3 @@ chapter15/15.3/15.3.2/15.3.2.1/15.3.2.1-11-6-s: FAIL
# Array.prototype.reduce - null passed as thisValue to strict callbackfn
# Invalid test case: http://es5conform.codeplex.com/workitem/29085
chapter15/15.4/15.4.4/15.4.4.21/15.4.4.21-9-c-ii-4-s: FAIL
-
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/deps/v8/test/message/message.status b/deps/v8/test/message/message.status
index 70354ceec..fc2896b1c 100644
--- a/deps/v8/test/message/message.status
+++ b/deps/v8/test/message/message.status
@@ -29,10 +29,3 @@ prefix message
# All tests in the bug directory are expected to fail.
bugs: FAIL
-
-
-##############################################################################
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/deps/v8/test/mjsunit/array-construct-transition.js b/deps/v8/test/mjsunit/array-construct-transition.js
new file mode 100644
index 000000000..5865e3320
--- /dev/null
+++ b/deps/v8/test/mjsunit/array-construct-transition.js
@@ -0,0 +1,39 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --smi-only-arrays
+
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
+
+if (support_smi_only_arrays) {
+ var a = new Array(0, 1, 2);
+ assertTrue(%HasFastSmiOnlyElements(a));
+ var b = new Array(0.5, 1.2, 2.3);
+ assertTrue(%HasFastDoubleElements(b));
+ var c = new Array(0.5, 1.2, new Object());
+ assertTrue(%HasFastElements(c));
+}
diff --git a/deps/v8/test/mjsunit/array-literal-transitions.js b/deps/v8/test/mjsunit/array-literal-transitions.js
index 321340c4b..4ddf2cb63 100644
--- a/deps/v8/test/mjsunit/array-literal-transitions.js
+++ b/deps/v8/test/mjsunit/array-literal-transitions.js
@@ -33,7 +33,13 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-support_smi_only_arrays = %HasFastSmiOnlyElements(new Array());
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
+
+if (support_smi_only_arrays) {
+ print("Tests include smi-only arrays.");
+} else {
+ print("Tests do NOT include smi-only arrays.");
+}
// IC and Crankshaft support for smi-only elements in dynamic array literals.
function get(foo) { return foo; } // Used to generate dynamic values.
@@ -122,4 +128,76 @@ if (support_smi_only_arrays) {
}
%OptimizeFunctionOnNextCall(test_large_literal);
test_large_literal();
+
+ function deopt_array(use_literal) {
+ if (use_literal) {
+ return [.5, 3, 4];
+ } else {
+ return new Array();
+ }
+ }
+
+ deopt_array(false);
+ deopt_array(false);
+ deopt_array(false);
+ %OptimizeFunctionOnNextCall(deopt_array);
+ var array = deopt_array(false);
+ assertTrue(2 != %GetOptimizationStatus(deopt_array));
+ deopt_array(true);
+ assertTrue(1 != %GetOptimizationStatus(deopt_array));
+ array = deopt_array(false);
+ assertTrue(1 != %GetOptimizationStatus(deopt_array));
+
+ // Check that unexpected changes in the objects stored into the boilerplate
+ // also force a deopt.
+ function deopt_array_literal_all_smis(a) {
+ return [0, 1, a];
+ }
+
+ deopt_array_literal_all_smis(2);
+ deopt_array_literal_all_smis(3);
+ deopt_array_literal_all_smis(4);
+ array = deopt_array_literal_all_smis(4);
+ assertEquals(0, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(4, array[2]);
+ %OptimizeFunctionOnNextCall(deopt_array_literal_all_smis);
+ array = deopt_array_literal_all_smis(5);
+ array = deopt_array_literal_all_smis(6);
+ assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+ assertEquals(0, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(6, array[2]);
+
+ array = deopt_array_literal_all_smis(.5);
+ assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_smis));
+ assertEquals(0, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(.5, array[2]);
+
+ function deopt_array_literal_all_doubles(a) {
+ return [0.5, 1, a];
+ }
+
+ deopt_array_literal_all_doubles(.5);
+ deopt_array_literal_all_doubles(.5);
+ deopt_array_literal_all_doubles(.5);
+ array = deopt_array_literal_all_doubles(0.5);
+ assertEquals(0.5, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(0.5, array[2]);
+ %OptimizeFunctionOnNextCall(deopt_array_literal_all_doubles);
+ array = deopt_array_literal_all_doubles(5);
+ array = deopt_array_literal_all_doubles(6);
+ assertTrue(2 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+ assertEquals(0.5, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(6, array[2]);
+
+ var foo = new Object();
+ array = deopt_array_literal_all_doubles(foo);
+ assertTrue(1 != %GetOptimizationStatus(deopt_array_literal_all_doubles));
+ assertEquals(0.5, array[0]);
+ assertEquals(1, array[1]);
+ assertEquals(foo, array[2]);
}
diff --git a/deps/v8/test/mjsunit/compiler/regress-106351.js b/deps/v8/test/mjsunit/compiler/regress-106351.js
new file mode 100644
index 000000000..2a67a055d
--- /dev/null
+++ b/deps/v8/test/mjsunit/compiler/regress-106351.js
@@ -0,0 +1,38 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+// Test Math.round with the input reused in the same expression.
+function test(x) {
+ var v = Math.round(x) - x;
+ assertEquals(0.5, v);
+}
+
+for (var i = 0; i < 5; ++i) test(0.5);
+%OptimizeFunctionOnNextCall(test);
+test(0.5);
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index 8a8a3c743..e5b5a66c4 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -34,7 +34,7 @@
// in this test case. Depending on whether smi-only arrays are actually
// enabled, this test takes the appropriate code path to check smi-only arrays.
-support_smi_only_arrays = %HasFastSmiOnlyElements([]);
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
if (support_smi_only_arrays) {
print("Tests include smi-only arrays.");
diff --git a/deps/v8/test/mjsunit/elements-transition.js b/deps/v8/test/mjsunit/elements-transition.js
index 5f6cc4fa3..67095c456 100644
--- a/deps/v8/test/mjsunit/elements-transition.js
+++ b/deps/v8/test/mjsunit/elements-transition.js
@@ -27,7 +27,13 @@
// Flags: --allow-natives-syntax --smi-only-arrays
-support_smi_only_arrays = %HasFastSmiOnlyElements([]);
+support_smi_only_arrays = %HasFastSmiOnlyElements([1,2,3,4,5,6,7,8,9,10]);
+
+if (support_smi_only_arrays) {
+ print("Tests include smi-only arrays.");
+} else {
+ print("Tests do NOT include smi-only arrays.");
+}
if (support_smi_only_arrays) {
function test(test_double, test_object, set, length) {
@@ -104,4 +110,4 @@ if (support_smi_only_arrays) {
assertEquals(1, b[0]);
} else {
print("Test skipped because smi only arrays are not supported.");
-} \ No newline at end of file
+}
diff --git a/deps/v8/test/mjsunit/harmony/block-const-assign.js b/deps/v8/test/mjsunit/harmony/block-const-assign.js
new file mode 100644
index 000000000..8297a558a
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/block-const-assign.js
@@ -0,0 +1,131 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+// Test that we throw early syntax errors in harmony mode
+// when using an immutable binding in an assigment or with
+// prefix/postfix decrement/increment operators.
+// TODO(ES6): properly activate extended mode
+"use strict";
+
+
+// Function local const.
+function constDecl0(use) {
+ return "(function() { const constvar = 1; " + use + "; });";
+}
+
+
+function constDecl1(use) {
+ return "(function() { " + use + "; const constvar = 1; });";
+}
+
+
+// Function local const, assign from eval.
+function constDecl2(use) {
+ use = "eval('(function() { " + use + " })')";
+ return "(function() { const constvar = 1; " + use + "; })();";
+}
+
+
+function constDecl3(use) {
+ use = "eval('(function() { " + use + " })')";
+ return "(function() { " + use + "; const constvar = 1; })();";
+}
+
+
+// Block local const.
+function constDecl4(use) {
+ return "(function() { { const constvar = 1; " + use + "; } });";
+}
+
+
+function constDecl5(use) {
+ return "(function() { { " + use + "; const constvar = 1; } });";
+}
+
+
+// Block local const, assign from eval.
+function constDecl6(use) {
+ use = "eval('(function() {" + use + "})')";
+ return "(function() { { const constvar = 1; " + use + "; } })();";
+}
+
+
+function constDecl7(use) {
+ use = "eval('(function() {" + use + "})')";
+ return "(function() { { " + use + "; const constvar = 1; } })();";
+}
+
+
+// Function expression name.
+function constDecl8(use) {
+ return "(function constvar() { " + use + "; });";
+}
+
+
+// Function expression name, assign from eval.
+function constDecl9(use) {
+ use = "eval('(function(){" + use + "})')";
+ return "(function constvar() { " + use + "; })();";
+}
+
+let decls = [ constDecl0,
+ constDecl1,
+ constDecl2,
+ constDecl3,
+ constDecl4,
+ constDecl5,
+ constDecl6,
+ constDecl7,
+ constDecl8,
+ constDecl9
+ ];
+let uses = [ 'constvar = 1;',
+ 'constvar += 1;',
+ '++constvar;',
+ 'constvar++;'
+ ];
+
+function Test(d,u) {
+ 'use strict';
+ try {
+ print(d(u));
+ eval(d(u));
+ } catch (e) {
+ assertInstanceof(e, SyntaxError);
+ assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
+ return;
+ }
+ assertUnreachable();
+}
+
+for (var d = 0; d < decls.length; ++d) {
+ for (var u = 0; u < uses.length; ++u) {
+ Test(decls[d], uses[u]);
+ }
+}
diff --git a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
index ba5bc0d4a..1db1792ea 100644
--- a/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
+++ b/deps/v8/test/mjsunit/harmony/block-let-crankshaft.js
@@ -30,8 +30,204 @@
// TODO(ES6): properly activate extended mode
"use strict";
+// Check that the following functions are optimizable.
+var functions = [ f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11, f12, f13, f14,
+ f15, f16, f17, f18, f19, f20, f21, f22, f23 ];
+
+for (var i = 0; i < functions.length; ++i) {
+ var func = functions[i];
+ print("Testing:");
+ print(func);
+ for (var j = 0; j < 10; ++j) {
+ func(12);
+ }
+ %OptimizeFunctionOnNextCall(func);
+ func(12);
+ assertTrue(%GetOptimizationStatus(func) != 2);
+}
+
+function f1() { }
+
+function f2(x) { }
+
+function f3() {
+ let x;
+}
+
+function f4() {
+ function foo() {
+ }
+}
+
+function f5() {
+ let x = 1;
+}
+
+function f6() {
+ const x = 1;
+}
+
+function f7(x) {
+ return x;
+}
+
+function f8() {
+ let x;
+ return x;
+}
+
+function f9() {
+ function x() {
+ }
+ return x;
+}
+
+function f10(x) {
+ x = 1;
+}
+
+function f11() {
+ let x;
+ x = 1;
+}
+
+function f12() {
+ function x() {};
+ x = 1;
+}
+
+function f13(x) {
+ (function() { x; });
+}
+
+function f14() {
+ let x;
+ (function() { x; });
+}
+
+function f15() {
+ function x() {
+ }
+ (function() { x; });
+}
+
+function f16() {
+ let x = 1;
+ (function() { x; });
+}
+
+function f17() {
+ const x = 1;
+ (function() { x; });
+}
+
+function f18(x) {
+ return x;
+ (function() { x; });
+}
+
+function f19() {
+ let x;
+ return x;
+ (function() { x; });
+}
+
+function f20() {
+ function x() {
+ }
+ return x;
+ (function() { x; });
+}
+
+function f21(x) {
+ x = 1;
+ (function() { x; });
+}
+
+function f22() {
+ let x;
+ x = 1;
+ (function() { x; });
+}
+
+function f23() {
+ function x() { }
+ x = 1;
+ (function() { x; });
+}
+
+
// Test that temporal dead zone semantics for function and block scoped
-// ket bindings are handled by the optimizing compiler.
+// let bindings are handled by the optimizing compiler.
+
+function TestFunctionLocal(s) {
+ 'use strict';
+ var func = eval("(function baz(){" + s + "; })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestFunctionContext(s) {
+ 'use strict';
+ var func = eval("(function baz(){ " + s + "; (function() { x; }); })");
+ print("Testing:");
+ print(func);
+ for (var i = 0; i < 5; ++i) {
+ print(i);
+ try {
+ func();
+ assertUnreachable();
+ } catch (e) {
+ assertInstanceof(e, ReferenceError);
+ }
+ }
+ print("optimize");
+ %OptimizeFunctionOnNextCall(func);
+ try {
+ print("call");
+ func();
+ assertUnreachable();
+ } catch (e) {
+ print("catch");
+ assertInstanceof(e, ReferenceError);
+ }
+}
+
+function TestAll(s) {
+ TestFunctionLocal(s);
+ TestFunctionContext(s);
+}
+
+// Use before initialization in declaration statement.
+TestAll('let x = x + 1');
+TestAll('let x = x += 1');
+TestAll('let x = x++');
+TestAll('let x = ++x');
+TestAll('const x = x + 1');
+
+// Use before initialization in prior statement.
+TestAll('x + 1; let x;');
+TestAll('x = 1; let x;');
+TestAll('x += 1; let x;');
+TestAll('++x; let x;');
+TestAll('x++; let x;');
+TestAll('let y = x; const x = 1;');
+
function f(x, b) {
let y = (b ? y : x) + 42;
diff --git a/deps/v8/test/mjsunit/math-pow.js b/deps/v8/test/mjsunit/math-pow.js
index 30d0cbdce..fb5f8a1f9 100644
--- a/deps/v8/test/mjsunit/math-pow.js
+++ b/deps/v8/test/mjsunit/math-pow.js
@@ -25,118 +25,149 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
// Tests the special cases specified by ES 15.8.2.13
-// Simple sanity check
-assertEquals(4, Math.pow(2, 2));
-assertEquals(2147483648, Math.pow(2, 31));
-assertEquals(0.25, Math.pow(2, -2));
-assertEquals(0.0625, Math.pow(2, -4));
-assertEquals(1, Math.pow(1, 100));
-assertEquals(0, Math.pow(0, 1000));
-
-// Spec tests
-assertEquals(NaN, Math.pow(2, NaN));
-assertEquals(NaN, Math.pow(+0, NaN));
-assertEquals(NaN, Math.pow(-0, NaN));
-assertEquals(NaN, Math.pow(Infinity, NaN));
-assertEquals(NaN, Math.pow(-Infinity, NaN));
-
-assertEquals(1, Math.pow(NaN, +0));
-assertEquals(1, Math.pow(NaN, -0));
-
-assertEquals(NaN, Math.pow(NaN, NaN));
-assertEquals(NaN, Math.pow(NaN, 2.2));
-assertEquals(NaN, Math.pow(NaN, 1));
-assertEquals(NaN, Math.pow(NaN, -1));
-assertEquals(NaN, Math.pow(NaN, -2.2));
-assertEquals(NaN, Math.pow(NaN, Infinity));
-assertEquals(NaN, Math.pow(NaN, -Infinity));
-
-assertEquals(Infinity, Math.pow(1.1, Infinity));
-assertEquals(Infinity, Math.pow(-1.1, Infinity));
-assertEquals(Infinity, Math.pow(2, Infinity));
-assertEquals(Infinity, Math.pow(-2, Infinity));
-
-// Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
-assertEquals(+Infinity, 1/Math.pow(1.1, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(-1.1, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(2, -Infinity));
-assertEquals(+Infinity, 1/Math.pow(-2, -Infinity));
-
-assertEquals(NaN, Math.pow(1, Infinity));
-assertEquals(NaN, Math.pow(1, -Infinity));
-assertEquals(NaN, Math.pow(-1, Infinity));
-assertEquals(NaN, Math.pow(-1, -Infinity));
-
-assertEquals(+0, Math.pow(0.1, Infinity));
-assertEquals(+0, Math.pow(-0.1, Infinity));
-assertEquals(+0, Math.pow(0.999, Infinity));
-assertEquals(+0, Math.pow(-0.999, Infinity));
-
-assertEquals(Infinity, Math.pow(0.1, -Infinity));
-assertEquals(Infinity, Math.pow(-0.1, -Infinity));
-assertEquals(Infinity, Math.pow(0.999, -Infinity));
-assertEquals(Infinity, Math.pow(-0.999, -Infinity));
-
-assertEquals(Infinity, Math.pow(Infinity, 0.1));
-assertEquals(Infinity, Math.pow(Infinity, 2));
-
-assertEquals(+Infinity, 1/Math.pow(Infinity, -0.1));
-assertEquals(+Infinity, 1/Math.pow(Infinity, -2));
-
-assertEquals(-Infinity, Math.pow(-Infinity, 3));
-assertEquals(-Infinity, Math.pow(-Infinity, 13));
-
-assertEquals(Infinity, Math.pow(-Infinity, 3.1));
-assertEquals(Infinity, Math.pow(-Infinity, 2));
-
-assertEquals(-Infinity, 1/Math.pow(-Infinity, -3));
-assertEquals(-Infinity, 1/Math.pow(-Infinity, -13));
-
-assertEquals(+Infinity, 1/Math.pow(-Infinity, -3.1));
-assertEquals(+Infinity, 1/Math.pow(-Infinity, -2));
-
-assertEquals(+Infinity, 1/Math.pow(+0, 1.1));
-assertEquals(+Infinity, 1/Math.pow(+0, 2));
-
-assertEquals(Infinity, Math.pow(+0, -1.1));
-assertEquals(Infinity, Math.pow(+0, -2));
-
-assertEquals(-Infinity, 1/Math.pow(-0, 3));
-assertEquals(-Infinity, 1/Math.pow(-0, 13));
-
-assertEquals(+Infinity, 1/Math.pow(-0, 3.1));
-assertEquals(+Infinity, 1/Math.pow(-0, 2));
-
-assertEquals(-Infinity, Math.pow(-0, -3));
-assertEquals(-Infinity, Math.pow(-0, -13));
-
-assertEquals(Infinity, Math.pow(-0, -3.1));
-assertEquals(Infinity, Math.pow(-0, -2));
-
-assertEquals(NaN, Math.pow(-0.00001, 1.1));
-assertEquals(NaN, Math.pow(-0.00001, -1.1));
-assertEquals(NaN, Math.pow(-1.1, 1.1));
-assertEquals(NaN, Math.pow(-1.1, -1.1));
-assertEquals(NaN, Math.pow(-2, 1.1));
-assertEquals(NaN, Math.pow(-2, -1.1));
-assertEquals(NaN, Math.pow(-1000, 1.1));
-assertEquals(NaN, Math.pow(-1000, -1.1));
-
-assertEquals(+Infinity, 1/Math.pow(-0, 0.5));
-assertEquals(+Infinity, 1/Math.pow(-0, 0.6));
-assertEquals(-Infinity, 1/Math.pow(-0, 1));
-assertEquals(-Infinity, 1/Math.pow(-0, 10000000001));
-
-assertEquals(+Infinity, Math.pow(-0, -0.5));
-assertEquals(+Infinity, Math.pow(-0, -0.6));
-assertEquals(-Infinity, Math.pow(-0, -1));
-assertEquals(-Infinity, Math.pow(-0, -10000000001));
-
-
-
-// Tests from Sputnik S8.5_A13_T1.
-assertTrue((1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
-assertTrue((1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
-assertTrue((-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
+function test() {
+ // Simple sanity check
+ assertEquals(4, Math.pow(2, 2));
+ assertEquals(2147483648, Math.pow(2, 31));
+ assertEquals(0.25, Math.pow(2, -2));
+ assertEquals(0.0625, Math.pow(2, -4));
+ assertEquals(1, Math.pow(1, 100));
+ assertEquals(0, Math.pow(0, 1000));
+
+ // Spec tests
+ assertEquals(NaN, Math.pow(2, NaN));
+ assertEquals(NaN, Math.pow(+0, NaN));
+ assertEquals(NaN, Math.pow(-0, NaN));
+ assertEquals(NaN, Math.pow(Infinity, NaN));
+ assertEquals(NaN, Math.pow(-Infinity, NaN));
+
+ assertEquals(1, Math.pow(NaN, +0));
+ assertEquals(1, Math.pow(NaN, -0));
+
+ assertEquals(NaN, Math.pow(NaN, NaN));
+ assertEquals(NaN, Math.pow(NaN, 2.2));
+ assertEquals(NaN, Math.pow(NaN, 1));
+ assertEquals(NaN, Math.pow(NaN, -1));
+ assertEquals(NaN, Math.pow(NaN, -2.2));
+ assertEquals(NaN, Math.pow(NaN, Infinity));
+ assertEquals(NaN, Math.pow(NaN, -Infinity));
+
+ assertEquals(Infinity, Math.pow(1.1, Infinity));
+ assertEquals(Infinity, Math.pow(-1.1, Infinity));
+ assertEquals(Infinity, Math.pow(2, Infinity));
+ assertEquals(Infinity, Math.pow(-2, Infinity));
+
+ // Because +0 == -0, we need to compare 1/{+,-}0 to {+,-}Infinity
+ assertEquals(+Infinity, 1/Math.pow(1.1, -Infinity));
+ assertEquals(+Infinity, 1/Math.pow(-1.1, -Infinity));
+ assertEquals(+Infinity, 1/Math.pow(2, -Infinity));
+ assertEquals(+Infinity, 1/Math.pow(-2, -Infinity));
+
+ assertEquals(NaN, Math.pow(1, Infinity));
+ assertEquals(NaN, Math.pow(1, -Infinity));
+ assertEquals(NaN, Math.pow(-1, Infinity));
+ assertEquals(NaN, Math.pow(-1, -Infinity));
+
+ assertEquals(+0, Math.pow(0.1, Infinity));
+ assertEquals(+0, Math.pow(-0.1, Infinity));
+ assertEquals(+0, Math.pow(0.999, Infinity));
+ assertEquals(+0, Math.pow(-0.999, Infinity));
+
+ assertEquals(Infinity, Math.pow(0.1, -Infinity));
+ assertEquals(Infinity, Math.pow(-0.1, -Infinity));
+ assertEquals(Infinity, Math.pow(0.999, -Infinity));
+ assertEquals(Infinity, Math.pow(-0.999, -Infinity));
+
+ assertEquals(Infinity, Math.pow(Infinity, 0.1));
+ assertEquals(Infinity, Math.pow(Infinity, 2));
+
+ assertEquals(+Infinity, 1/Math.pow(Infinity, -0.1));
+ assertEquals(+Infinity, 1/Math.pow(Infinity, -2));
+
+ assertEquals(-Infinity, Math.pow(-Infinity, 3));
+ assertEquals(-Infinity, Math.pow(-Infinity, 13));
+
+ assertEquals(Infinity, Math.pow(-Infinity, 3.1));
+ assertEquals(Infinity, Math.pow(-Infinity, 2));
+
+ assertEquals(-Infinity, 1/Math.pow(-Infinity, -3));
+ assertEquals(-Infinity, 1/Math.pow(-Infinity, -13));
+
+ assertEquals(+Infinity, 1/Math.pow(-Infinity, -3.1));
+ assertEquals(+Infinity, 1/Math.pow(-Infinity, -2));
+
+ assertEquals(+Infinity, 1/Math.pow(+0, 1.1));
+ assertEquals(+Infinity, 1/Math.pow(+0, 2));
+
+ assertEquals(Infinity, Math.pow(+0, -1.1));
+ assertEquals(Infinity, Math.pow(+0, -2));
+
+ assertEquals(-Infinity, 1/Math.pow(-0, 3));
+ assertEquals(-Infinity, 1/Math.pow(-0, 13));
+
+ assertEquals(+Infinity, 1/Math.pow(-0, 3.1));
+ assertEquals(+Infinity, 1/Math.pow(-0, 2));
+
+ assertEquals(-Infinity, Math.pow(-0, -3));
+ assertEquals(-Infinity, Math.pow(-0, -13));
+
+ assertEquals(Infinity, Math.pow(-0, -3.1));
+ assertEquals(Infinity, Math.pow(-0, -2));
+
+ assertEquals(NaN, Math.pow(-0.00001, 1.1));
+ assertEquals(NaN, Math.pow(-0.00001, -1.1));
+ assertEquals(NaN, Math.pow(-1.1, 1.1));
+ assertEquals(NaN, Math.pow(-1.1, -1.1));
+ assertEquals(NaN, Math.pow(-2, 1.1));
+ assertEquals(NaN, Math.pow(-2, -1.1));
+ assertEquals(NaN, Math.pow(-1000, 1.1));
+ assertEquals(NaN, Math.pow(-1000, -1.1));
+
+ assertEquals(+Infinity, 1/Math.pow(-0, 0.5));
+ assertEquals(+Infinity, 1/Math.pow(-0, 0.6));
+ assertEquals(-Infinity, 1/Math.pow(-0, 1));
+ assertEquals(-Infinity, 1/Math.pow(-0, 10000000001));
+
+ assertEquals(+Infinity, Math.pow(-0, -0.5));
+ assertEquals(+Infinity, Math.pow(-0, -0.6));
+ assertEquals(-Infinity, Math.pow(-0, -1));
+ assertEquals(-Infinity, Math.pow(-0, -10000000001));
+
+ assertEquals(4, Math.pow(16, 0.5));
+ assertEquals(NaN, Math.pow(-16, 0.5));
+ assertEquals(0.25, Math.pow(16, -0.5));
+ assertEquals(NaN, Math.pow(-16, -0.5));
+
+ // Test detecting and converting integer value as double.
+ assertEquals(8, Math.pow(2, Math.sqrt(9)));
+
+ // Tests from Mozilla 15.8.2.13.
+ assertEquals(2, Math.pow.length);
+ assertEquals(NaN, Math.pow());
+ assertEquals(1, Math.pow(null, null));
+ assertEquals(NaN, Math.pow(void 0, void 0));
+ assertEquals(1, Math.pow(true, false));
+ assertEquals(0, Math.pow(false, true));
+ assertEquals(Infinity, Math.pow(-Infinity, Infinity));
+ assertEquals(0, Math.pow(-Infinity, -Infinity));
+ assertEquals(1, Math.pow(0, 0));
+ assertEquals(0, Math.pow(0, Infinity));
+ assertEquals(NaN, Math.pow(NaN, 0.5));
+ assertEquals(NaN, Math.pow(NaN, -0.5));
+
+ // Tests from Sputnik S8.5_A13_T1.
+ assertTrue(
+ (1*((Math.pow(2,53))-1)*(Math.pow(2,-1074))) === 4.4501477170144023e-308);
+ assertTrue(
+ (1*(Math.pow(2,52))*(Math.pow(2,-1074))) === 2.2250738585072014e-308);
+ assertTrue(
+ (-1*(Math.pow(2,52))*(Math.pow(2,-1074))) === -2.2250738585072014e-308);
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test(); \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index b77ebca66..a43d3ea4e 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -34,10 +34,6 @@ bugs: FAIL
# Fails.
regress/regress-1119: FAIL
-#############################################################################
-# Fails due to r10102 which reverts precise stepping on the 3.6 branch.
-debug-step-2: FAIL
-
##############################################################################
# Issue 1845: http://code.google.com/p/v8/issues/detail?id=1845
@@ -54,16 +50,16 @@ regress/regress-create-exception: PASS, SKIP if $mode == debug
##############################################################################
# This one uses a built-in that's only present in debug mode. It takes
-# too long to run in debug mode on ARM.
-fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm)
+# too long to run in debug mode on ARM and MIPS.
+fuzz-natives: PASS, SKIP if ($mode == release || $arch == arm || $arch == mips)
big-object-literal: PASS, SKIP if ($arch == arm)
# Issue 488: this test sometimes times out.
array-constructor: PASS || TIMEOUT
-# Very slow on ARM, contains no architecture dependent code.
-unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm)
+# Very slow on ARM and MIPS, contains no architecture dependent code.
+unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == mips)
# Skip long running test in debug and allow it to timeout in release mode.
regress/regress-524: (PASS || TIMEOUT), SKIP if $mode == debug
@@ -126,11 +122,23 @@ regress/regress-1132: SKIP
##############################################################################
[ $arch == mips ]
-# Run those tests, but expect them to time out.
-array-sort: PASS || TIMEOUT
+
+# Slow tests which times out in debug mode.
+try: PASS, SKIP if $mode == debug
+debug-scripts-request: PASS, SKIP if $mode == debug
+array-constructor: PASS, SKIP if $mode == debug
+
+# Times out often in release mode on MIPS.
+compiler/regress-stacktrace-methods: PASS, PASS || TIMEOUT if $mode == release
+array-splice: PASS || TIMEOUT
+
+# Long running test.
mirror-object: PASS || TIMEOUT
+string-indexof-2: PASS || TIMEOUT
-# Skip long-running tests.
+# BUG(3251035): Timeouts in long looping crankshaft optimization
+# tests. Skipping because having them timeout takes too long on the
+# buildbot.
compiler/alloc-number: SKIP
compiler/array-length: SKIP
compiler/assignment-deopt: SKIP
@@ -155,12 +163,8 @@ regress/regress-634: SKIP
regress/regress-create-exception: SKIP
regress/regress-3218915: SKIP
regress/regress-3247124: SKIP
-regress/regress-1132: SKIP
-regress/regress-1257: SKIP
-regress/regress-91008: SKIP
-##############################################################################
-[ $isolates ]
-# d8-os writes temporary files that might interfer with each other when running
-# in multible threads. Skip this if running with isolates testing.
-d8-os: SKIP
+# Requires bigger stack size in the Genesis and if stack size is increased,
+# the test requires too much time to run. However, the problem test covers
+# should be platform-independent.
+regress/regress-1132: SKIP
diff --git a/deps/v8/test/mjsunit/regress/regress-397.js b/deps/v8/test/mjsunit/regress/regress-397.js
index 111f4a6e5..0e4143d03 100644
--- a/deps/v8/test/mjsunit/regress/regress-397.js
+++ b/deps/v8/test/mjsunit/regress/regress-397.js
@@ -25,10 +25,19 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Flags: --allow-natives-syntax
// See http://code.google.com/p/v8/issues/detail?id=397
-assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
-assertEquals(0, Math.pow(Infinity, -0.5));
-assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
-assertEquals(0, Math.pow(-Infinity, -0.5));
+function test() {
+ assertEquals("Infinity", String(Math.pow(Infinity, 0.5)));
+ assertEquals(0, Math.pow(Infinity, -0.5));
+
+ assertEquals("Infinity", String(Math.pow(-Infinity, 0.5)));
+ assertEquals(0, Math.pow(-Infinity, -0.5));
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/deps/v8/test/mjsunit/regress/regress-97116.js b/deps/v8/test/mjsunit/regress/regress-97116.js
new file mode 100644
index 000000000..b858ca5e8
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-97116.js
@@ -0,0 +1,50 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+// Check that we are not flushing code for inlined functions that
+// have a pending lazy deoptimization on the stack.
+
+function deopt() {
+ try { } catch (e) { } // Avoid inlining.
+ %DeoptimizeFunction(outer);
+ for (var i = 0; i < 10; i++) gc(); // Force code flushing.
+}
+
+function outer(should_deopt) {
+ inner(should_deopt);
+}
+
+function inner(should_deopt) {
+ if (should_deopt) deopt();
+}
+
+outer(false);
+outer(false);
+%OptimizeFunctionOnNextCall(outer);
+outer(true);
diff --git a/deps/v8/test/mjsunit/string-external-cached.js b/deps/v8/test/mjsunit/string-external-cached.js
index 12312ac76..0a95830d0 100644
--- a/deps/v8/test/mjsunit/string-external-cached.js
+++ b/deps/v8/test/mjsunit/string-external-cached.js
@@ -59,7 +59,7 @@ function test() {
} catch (ex) { }
assertEquals("1", charat_short.charAt(1));
- // Test regexp.
+ // Test regexp and short substring.
var re = /(A|B)/;
var rere = /(T.{1,2}B)/;
var ascii = "ABCDEFGHIJKLMNOPQRST";
@@ -81,6 +81,10 @@ function test() {
assertEquals(["A", "A"], re.exec(twobyte));
assertEquals(["B", "B"], re.exec(twobyte_slice));
assertEquals(["T_AB", "T_AB"], rere.exec(twobyte_cons));
+ assertEquals("DEFG", ascii_slice.substr(2, 4));
+ assertEquals("DEFG", twobyte_slice.substr(2, 4));
+ assertEquals("DEFG", ascii_cons.substr(3, 4));
+ assertEquals("DEFG", twobyte_cons.substr(4, 4));
}
}
diff --git a/deps/v8/test/mjsunit/string-slices.js b/deps/v8/test/mjsunit/string-slices.js
index 3eb30f18d..5b1dc360a 100755
--- a/deps/v8/test/mjsunit/string-slices.js
+++ b/deps/v8/test/mjsunit/string-slices.js
@@ -160,6 +160,23 @@ for ( var i = 0; i < 1000; i++) {
f(flat, cons, slice, i);
}
+// Short substrings.
+flat = "abcdefghijkl12345";
+cons = flat + flat.toUpperCase();
+/x/.exec(cons); // Flatten cons
+slice = "abcdefghijklmn12345".slice(1, -1);
+assertEquals("cdefg", flat.substr(2, 5));
+assertEquals("cdefg", cons.substr(2, 5));
+assertEquals("cdefg", slice.substr(1, 5));
+
+flat = "abc\u1234defghijkl12345";
+cons = flat + flat.toUpperCase();
+/x/.exec(cons); // Flatten cons
+slice = "abc\u1234defghijklmn12345".slice(1, -1);
+assertEquals("c\u1234def", flat.substr(2, 5));
+assertEquals("c\u1234def", cons.substr(2, 5));
+assertEquals("c\u1234def", slice.substr(1, 5));
+
// Concatenate substrings.
var ascii = 'abcdefghijklmnop';
var utf = '\u03B1\u03B2\u03B3\u03B4\u03B5\u03B6\u03B7\u03B8\u03B9\u03BA\u03BB';
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index e31a630b8..bc096d5ca 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -122,14 +122,14 @@ ecma/Date/15.9.2.2-4: PASS || FAIL
ecma/Date/15.9.2.2-5: PASS || FAIL
ecma/Date/15.9.2.2-6: PASS || FAIL
-# 1026139: These date tests fail on arm
-ecma/Date/15.9.5.29-1: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.34-1: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.28-1: PASS || ($ARM && FAIL)
+# 1026139: These date tests fail on arm and mips
+ecma/Date/15.9.5.29-1: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.34-1: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.28-1: PASS || (($ARM || $MIPS) && FAIL)
-# 1050186: Arm vm is broken; probably unrelated to dates
-ecma/Array/15.4.4.5-3: PASS || ($ARM && FAIL)
-ecma/Date/15.9.5.22-2: PASS || ($ARM && FAIL)
+# 1050186: Arm/MIPS vm is broken; probably unrelated to dates
+ecma/Array/15.4.4.5-3: PASS || (($ARM || $MIPS) && FAIL)
+ecma/Date/15.9.5.22-2: PASS || (($ARM || $MIPS) && FAIL)
# Flaky test that fails due to what appears to be a bug in the test.
# Occurs depending on current time
@@ -863,3 +863,59 @@ ecma/Expressions/11.7.3: SKIP
ecma/Expressions/11.10-3: SKIP
ecma/Expressions/11.7.1: SKIP
ecma_3/RegExp/regress-209067: SKIP
+
+[ $arch == mips ]
+
+# Times out and print so much output that we need to skip it to not
+# hang the builder.
+js1_5/extensions/regress-342960: SKIP
+
+# BUG(3251229): Times out when running new crankshaft test script.
+ecma_3/RegExp/regress-311414: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.10-2: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+js1_5/Array/regress-99120-02: SKIP
+js1_5/extensions/regress-371636: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+js1_5/Regress/regress-404755: SKIP
+js1_5/Regress/regress-451322: SKIP
+
+
+# BUG(1040): Allow this test to timeout.
+js1_5/GC/regress-203278-2: PASS || TIMEOUT
+
+
+[ $fast == yes && $arch == mips ]
+
+# In fast mode on mips we try to skip all tests that would time out,
+# since running the tests takes so long in the first place.
+
+js1_5/Regress/regress-280769-2: SKIP
+js1_5/Regress/regress-280769-3: SKIP
+js1_5/Regress/regress-244470: SKIP
+js1_5/Regress/regress-203278-1: SKIP
+js1_5/Regress/regress-290575: SKIP
+js1_5/Regress/regress-159334: SKIP
+js1_5/Regress/regress-321971: SKIP
+js1_5/Regress/regress-347306-01: SKIP
+js1_5/Regress/regress-280769-1: SKIP
+js1_5/Regress/regress-280769-5: SKIP
+js1_5/GC/regress-306788: SKIP
+js1_5/GC/regress-278725: SKIP
+js1_5/GC/regress-203278-3: SKIP
+js1_5/GC/regress-311497: SKIP
+js1_5/Array/regress-99120-02: SKIP
+ecma/Date/15.9.5.22-1: SKIP
+ecma/Date/15.9.5.20: SKIP
+ecma/Date/15.9.5.12-2: SKIP
+ecma/Date/15.9.5.8: SKIP
+ecma/Date/15.9.5.9: SKIP
+ecma/Date/15.9.5.11-2: SKIP
+ecma/Expressions/11.7.2: SKIP
+ecma/Expressions/11.10-2: SKIP
+ecma/Expressions/11.7.3: SKIP
+ecma/Expressions/11.10-3: SKIP
+ecma/Expressions/11.7.1: SKIP
+ecma_3/RegExp/regress-209067: SKIP
diff --git a/deps/v8/test/preparser/preparser.status b/deps/v8/test/preparser/preparser.status
index db177782e..6f15fedd8 100644
--- a/deps/v8/test/preparser/preparser.status
+++ b/deps/v8/test/preparser/preparser.status
@@ -31,9 +31,3 @@ prefix preparser
# escapes (we need to parse to distinguish octal escapes from valid
# back-references).
strict-octal-regexp: FAIL
-
-##############################################################################
-[ $arch == mips ]
-
-# Skip all tests on MIPS.
-*: SKIP
diff --git a/deps/v8/test/sputnik/sputnik.status b/deps/v8/test/sputnik/sputnik.status
index fb6d95137..a587a6d4a 100644
--- a/deps/v8/test/sputnik/sputnik.status
+++ b/deps/v8/test/sputnik/sputnik.status
@@ -224,5 +224,14 @@ S15.1.3.2_A2.5_T1: SKIP
[ $arch == mips ]
-# Skip all tests on MIPS.
-*: SKIP
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 2ad574640..c7d363d0d 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -43,190 +43,12 @@ S10.4.2.1_A1: FAIL
S15.3.3.1_A4: FAIL
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1756
-15.2.3.6-4-116: FAIL
-15.2.3.6-4-117: FAIL
-15.2.3.6-4-125: FAIL
-15.2.3.6-4-126: FAIL
-15.2.3.6-4-127: FAIL
-15.2.3.6-4-128: FAIL
-15.2.3.6-4-129: FAIL
-15.2.3.6-4-130: FAIL
-15.2.3.6-4-131: FAIL
-15.2.3.6-4-132: FAIL
-15.2.3.6-4-133: FAIL
-15.2.3.6-4-134: FAIL
-15.2.3.6-4-135: FAIL
-15.2.3.6-4-136: FAIL
-15.2.3.6-4-137: FAIL
-15.2.3.6-4-138: FAIL
-15.2.3.6-4-139: FAIL
-15.2.3.6-4-140: FAIL
-15.2.3.6-4-141: FAIL
-15.2.3.6-4-142: FAIL
-15.2.3.6-4-143: FAIL
-15.2.3.6-4-144: FAIL
-15.2.3.6-4-145: FAIL
-15.2.3.6-4-146: FAIL
-15.2.3.6-4-147: FAIL
-15.2.3.6-4-148: FAIL
-15.2.3.6-4-149: FAIL
-15.2.3.6-4-150: FAIL
-15.2.3.6-4-151: FAIL
-15.2.3.6-4-152: FAIL
-15.2.3.6-4-153: FAIL
-15.2.3.6-4-154: FAIL
-15.2.3.6-4-155: FAIL
-15.2.3.6-4-156: FAIL
-15.2.3.6-4-157: FAIL
-15.2.3.6-4-159: FAIL
-15.2.3.6-4-161: FAIL
-15.2.3.6-4-165: FAIL
-15.2.3.6-4-166: FAIL
-15.2.3.6-4-167: FAIL
-15.2.3.6-4-168: FAIL
-15.2.3.6-4-169: FAIL
-15.2.3.6-4-170: FAIL
-15.2.3.6-4-171: FAIL
-15.2.3.6-4-172: FAIL
-15.2.3.6-4-173: FAIL
-15.2.3.6-4-174: FAIL
-15.2.3.6-4-175: FAIL
-15.2.3.6-4-176: FAIL
-15.2.3.6-4-177: FAIL
-15.2.3.6-4-178: FAIL
-15.2.3.6-4-179-1: FAIL
-15.2.3.6-4-181: FAIL
-15.2.3.6-4-183: FAIL
-15.2.3.6-4-188: FAIL
-15.2.3.6-4-189: FAIL
-15.2.3.6-4-275: FAIL
-15.2.3.6-4-276: FAIL
-15.2.3.6-4-292-1: FAIL
-15.2.3.6-4-293-2: FAIL
-15.2.3.6-4-293-3: FAIL
-15.2.3.6-4-294-1: FAIL
-15.2.3.6-4-295-1: FAIL
-15.2.3.6-4-296-1: FAIL
-15.2.3.6-4-333-11: FAIL
-15.2.3.6-4-360-1: FAIL
-15.2.3.6-4-360-6: FAIL
-15.2.3.6-4-360-7: FAIL
-15.2.3.6-4-405: FAIL
-15.2.3.6-4-410: FAIL
-15.2.3.6-4-415: FAIL
-15.2.3.6-4-420: FAIL
-15.2.3.7-6-a-112: FAIL
-15.2.3.7-6-a-113: FAIL
-15.2.3.7-6-a-122: FAIL
-15.2.3.7-6-a-123: FAIL
-15.2.3.7-6-a-124: FAIL
-15.2.3.7-6-a-125: FAIL
-15.2.3.7-6-a-126: FAIL
-15.2.3.7-6-a-127: FAIL
-15.2.3.7-6-a-128: FAIL
-15.2.3.7-6-a-133: FAIL
-15.2.3.7-6-a-138: FAIL
-15.2.3.7-6-a-139: FAIL
-15.2.3.7-6-a-140: FAIL
-15.2.3.7-6-a-142: FAIL
-15.2.3.7-6-a-143: FAIL
-15.2.3.7-6-a-144: FAIL
-15.2.3.7-6-a-145: FAIL
-15.2.3.7-6-a-147: FAIL
-15.2.3.7-6-a-150: FAIL
-15.2.3.7-6-a-151: FAIL
-15.2.3.7-6-a-155: FAIL
-15.2.3.7-6-a-157: FAIL
-15.2.3.7-6-a-161: FAIL
-15.2.3.7-6-a-162: FAIL
-15.2.3.7-6-a-163: FAIL
-15.2.3.7-6-a-164: FAIL
-15.2.3.7-6-a-165: FAIL
-15.2.3.7-6-a-166: FAIL
-15.2.3.7-6-a-167: FAIL
-15.2.3.7-6-a-168: FAIL
-15.2.3.7-6-a-169: FAIL
-15.2.3.7-6-a-170: FAIL
-15.2.3.7-6-a-171: FAIL
-15.2.3.7-6-a-172: FAIL
-15.2.3.7-6-a-173: FAIL
-15.2.3.7-6-a-174: FAIL
-15.2.3.7-6-a-175: FAIL
-15.2.3.7-6-a-176: FAIL
-15.2.3.7-6-a-177: FAIL
-15.2.3.7-6-a-121: FAIL
-15.2.3.7-6-a-130: FAIL
-15.2.3.7-6-a-129: FAIL
-15.2.3.7-6-a-131: FAIL
-15.2.3.7-6-a-132: FAIL
-15.2.3.7-6-a-136: FAIL
-15.2.3.7-6-a-135: FAIL
-15.2.3.7-6-a-134: FAIL
-15.2.3.7-6-a-137: FAIL
-15.2.3.7-6-a-141: FAIL
-15.2.3.7-6-a-146: FAIL
-15.2.3.7-6-a-148: FAIL
-15.2.3.7-6-a-149: FAIL
-15.2.3.7-6-a-152: FAIL
-15.2.3.7-6-a-153: FAIL
-15.2.3.7-6-a-179: FAIL
-15.2.3.7-6-a-184: FAIL
-15.2.3.7-6-a-185: FAIL
-15.2.3.7-6-a-264: FAIL
-15.2.3.7-6-a-265: FAIL
-15.4.4.14-9-b-i-11: FAIL
-15.4.4.14-9-b-i-13: FAIL
-15.4.4.14-9-b-i-17: FAIL
-15.4.4.14-9-b-i-19: FAIL
-15.4.4.14-9-b-i-28: FAIL
-15.4.4.14-9-b-i-30: FAIL
-15.4.4.15-8-a-14: FAIL
-15.4.4.15-8-b-i-11: FAIL
-15.4.4.15-8-b-i-13: FAIL
-15.4.4.15-8-b-i-17: FAIL
-15.4.4.15-8-b-i-28: FAIL
-15.4.4.15-8-b-i-30: FAIL
-15.4.4.16-7-c-i-10: FAIL
-15.4.4.16-7-c-i-12: FAIL
-15.4.4.16-7-c-i-14: FAIL
-15.4.4.16-7-c-i-18: FAIL
-15.4.4.16-7-c-i-20: FAIL
-15.4.4.16-7-c-i-28: FAIL
-15.4.4.17-7-c-i-10: FAIL
-15.4.4.17-7-c-i-12: FAIL
-15.4.4.17-7-c-i-14: FAIL
-15.4.4.17-7-c-i-18: FAIL
-15.4.4.17-7-c-i-20: FAIL
-15.4.4.17-7-c-i-28: FAIL
-15.4.4.18-7-c-i-10: FAIL
-15.4.4.18-7-c-i-12: FAIL
-15.4.4.18-7-c-i-14: FAIL
-15.4.4.18-7-c-i-18: FAIL
-15.4.4.18-7-c-i-20: FAIL
-15.4.4.18-7-c-i-28: FAIL
-15.4.4.19-8-c-i-10: FAIL
-15.4.4.19-8-c-i-14: FAIL
-15.4.4.19-8-c-i-12: FAIL
-15.4.4.19-8-c-i-18: FAIL
-15.4.4.19-8-c-i-19: FAIL
-15.4.4.19-8-c-i-28: FAIL
-15.4.4.20-9-c-i-10: FAIL
-15.4.4.20-9-c-i-12: FAIL
-15.4.4.20-9-c-i-14: FAIL
-15.4.4.20-9-c-i-18: FAIL
-15.4.4.20-9-c-i-20: FAIL
-15.4.4.20-9-c-i-28: FAIL
-15.4.4.22-8-b-2: FAIL
-15.4.4.22-8-b-iii-1-12: FAIL
-15.4.4.22-8-b-iii-1-18: FAIL
-15.4.4.22-8-b-iii-1-20: FAIL
-15.4.4.22-8-b-iii-1-33: FAIL
-15.4.4.22-8-b-iii-1-30: FAIL
-15.4.4.22-9-b-13: FAIL
-15.4.4.22-9-b-24: FAIL
-15.4.4.22-9-b-26: FAIL
-15.4.4.22-9-b-9: FAIL
-15.4.4.22-9-c-i-30: FAIL
+15.2.3.6-4-167: FAIL || PASS
+15.2.3.6-4-181: FAIL || PASS
+15.2.3.7-6-a-163: FAIL || PASS
+15.2.3.7-6-a-164: FAIL || PASS
+15.2.3.7-6-a-176: FAIL || PASS
+15.2.3.7-6-a-177: FAIL || PASS
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=1772
15.2.3.6-4-292-1: FAIL
@@ -438,5 +260,14 @@ S15.1.3.2_A2.5_T1: SKIP
[ $arch == mips ]
-# Skip all tests on MIPS.
-*: SKIP
+# BUG(3251225): Tests that timeout with --nocrankshaft.
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.1_A2.4_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.4_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
+S15.1.3.3_A2.3_T1: SKIP
+S15.1.3.4_A2.3_T1: SKIP
+S15.1.3.1_A2.5_T1: SKIP
+S15.1.3.2_A2.5_T1: SKIP
diff --git a/deps/v8/tools/test-wrapper-gypbuild.py b/deps/v8/tools/test-wrapper-gypbuild.py
index a990b7ee5..e9984d76c 100755
--- a/deps/v8/tools/test-wrapper-gypbuild.py
+++ b/deps/v8/tools/test-wrapper-gypbuild.py
@@ -146,7 +146,7 @@ def ProcessOptions(options):
print "Unknown mode %s" % mode
return False
for arch in options.arch:
- if not arch in ['ia32', 'x64', 'arm']:
+ if not arch in ['ia32', 'x64', 'arm', 'mips']:
print "Unknown architecture %s" % arch
return False