summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-04-17 16:10:37 +0200
committerBen Noordhuis <info@bnoordhuis.nl>2013-04-17 16:10:37 +0200
commit9f682265d6631a29457abeb53827d01fa77493c8 (patch)
tree92a1eec49b1f280931598a72dcf0cca3d795f210 /deps/v8
parent951e0b69fa3c8b1a5d708e29de9d6f7d1db79827 (diff)
downloadnode-9f682265d6631a29457abeb53827d01fa77493c8.tar.gz
deps: upgrade v8 to 3.18.0
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/ChangeLog19
-rw-r--r--deps/v8/Makefile130
-rw-r--r--deps/v8/Makefile.nacl96
-rw-r--r--deps/v8/PRESUBMIT.py2
-rw-r--r--deps/v8/build/common.gypi148
-rw-r--r--deps/v8/build/standalone.gypi4
-rw-r--r--deps/v8/include/v8-preparser.h2
-rw-r--r--deps/v8/include/v8-profiler.h49
-rw-r--r--deps/v8/include/v8.h121
-rw-r--r--deps/v8/preparser/preparser-process.cc6
-rw-r--r--deps/v8/preparser/preparser.gyp17
-rw-r--r--deps/v8/src/accessors.cc58
-rw-r--r--deps/v8/src/accessors.h7
-rw-r--r--deps/v8/src/allocation.cc6
-rw-r--r--deps/v8/src/api.cc237
-rw-r--r--deps/v8/src/api.h4
-rw-r--r--deps/v8/src/arguments.h21
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h18
-rw-r--r--deps/v8/src/arm/assembler-arm.cc202
-rw-r--r--deps/v8/src/arm/assembler-arm.h19
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc1112
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h6
-rw-r--r--deps/v8/src/arm/codegen-arm.cc26
-rw-r--r--deps/v8/src/arm/constants-arm.cc2
-rw-r--r--deps/v8/src/arm/constants-arm.h19
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc204
-rw-r--r--deps/v8/src/arm/disasm-arm.cc5
-rw-r--r--deps/v8/src/arm/frames-arm.h12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc78
-rw-r--r--deps/v8/src/arm/lithium-arm.cc157
-rw-r--r--deps/v8/src/arm/lithium-arm.h148
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc551
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h14
-rw-r--r--deps/v8/src/arm/lithium-gap-resolver-arm.cc11
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc235
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h43
-rw-r--r--deps/v8/src/arm/simulator-arm.cc211
-rw-r--r--deps/v8/src/arm/simulator-arm.h8
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc611
-rw-r--r--deps/v8/src/array.js13
-rw-r--r--deps/v8/src/assembler.cc20
-rw-r--r--deps/v8/src/assembler.h4
-rw-r--r--deps/v8/src/ast.cc6
-rw-r--r--deps/v8/src/ast.h19
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.cc7
-rw-r--r--deps/v8/src/atomicops_internals_x86_gcc.h2
-rw-r--r--deps/v8/src/bignum.cc9
-rw-r--r--deps/v8/src/bootstrapper.cc120
-rw-r--r--deps/v8/src/builtins-decls.h40
-rw-r--r--deps/v8/src/builtins.cc40
-rw-r--r--deps/v8/src/builtins.h2
-rw-r--r--deps/v8/src/code-stubs-hydrogen.cc72
-rw-r--r--deps/v8/src/code-stubs.cc6
-rw-r--r--deps/v8/src/code-stubs.h56
-rw-r--r--deps/v8/src/collection.js97
-rw-r--r--deps/v8/src/compiler.cc31
-rw-r--r--deps/v8/src/contexts.h18
-rw-r--r--deps/v8/src/conversions-inl.h2
-rw-r--r--deps/v8/src/cpu-profiler.cc4
-rw-r--r--deps/v8/src/cpu-profiler.h1
-rw-r--r--deps/v8/src/d8.cc12
-rw-r--r--deps/v8/src/d8.gyp7
-rw-r--r--deps/v8/src/date.js19
-rw-r--r--deps/v8/src/debug.cc16
-rw-r--r--deps/v8/src/deoptimizer.cc116
-rw-r--r--deps/v8/src/deoptimizer.h47
-rw-r--r--deps/v8/src/disassembler.cc11
-rw-r--r--deps/v8/src/elements.cc4
-rw-r--r--deps/v8/src/execution.cc24
-rw-r--r--deps/v8/src/execution.h5
-rw-r--r--deps/v8/src/factory.cc20
-rw-r--r--deps/v8/src/flag-definitions.h35
-rw-r--r--deps/v8/src/flags.cc29
-rw-r--r--deps/v8/src/frames.h13
-rw-r--r--deps/v8/src/full-codegen.cc34
-rw-r--r--deps/v8/src/full-codegen.h19
-rw-r--r--deps/v8/src/gdb-jit.cc39
-rw-r--r--deps/v8/src/generator.js74
-rw-r--r--deps/v8/src/global-handles.h7
-rw-r--r--deps/v8/src/globals.h12
-rw-r--r--deps/v8/src/handles-inl.h1
-rw-r--r--deps/v8/src/handles.cc27
-rw-r--r--deps/v8/src/handles.h12
-rw-r--r--deps/v8/src/heap-inl.h22
-rw-r--r--deps/v8/src/heap-snapshot-generator.cc2
-rw-r--r--deps/v8/src/heap.cc124
-rw-r--r--deps/v8/src/heap.h60
-rw-r--r--deps/v8/src/hydrogen-instructions.cc97
-rw-r--r--deps/v8/src/hydrogen-instructions.h289
-rw-r--r--deps/v8/src/hydrogen.cc613
-rw-r--r--deps/v8/src/hydrogen.h72
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc6
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h7
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc35
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc191
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc579
-rw-r--r--deps/v8/src/ia32/cpu-ia32.cc2
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc114
-rw-r--r--deps/v8/src/ia32/frames-ia32.h12
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc34
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc961
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h36
-rw-r--r--deps/v8/src/ia32/lithium-gap-resolver-ia32.cc72
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc298
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h210
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc62
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h5
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc215
-rw-r--r--deps/v8/src/ic.cc14
-rw-r--r--deps/v8/src/isolate.cc71
-rw-r--r--deps/v8/src/isolate.h24
-rw-r--r--deps/v8/src/json-parser.h136
-rw-r--r--deps/v8/src/json-stringifier.h19
-rw-r--r--deps/v8/src/json.js18
-rw-r--r--deps/v8/src/jsregexp.cc3
-rw-r--r--deps/v8/src/list-inl.h3
-rw-r--r--deps/v8/src/lithium-allocator.cc28
-rw-r--r--deps/v8/src/lithium-allocator.h13
-rw-r--r--deps/v8/src/liveedit.cc14
-rw-r--r--deps/v8/src/log-utils.cc5
-rw-r--r--deps/v8/src/log.cc147
-rw-r--r--deps/v8/src/log.h50
-rw-r--r--deps/v8/src/macro-assembler.h25
-rw-r--r--deps/v8/src/mark-compact.cc10
-rw-r--r--deps/v8/src/math.js13
-rw-r--r--deps/v8/src/messages.js4
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h12
-rw-r--r--deps/v8/src/mips/assembler-mips.cc64
-rw-r--r--deps/v8/src/mips/assembler-mips.h5
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc1293
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h27
-rw-r--r--deps/v8/src/mips/codegen-mips.cc32
-rw-r--r--deps/v8/src/mips/constants-mips.h5
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc159
-rw-r--r--deps/v8/src/mips/disasm-mips.cc4
-rw-r--r--deps/v8/src/mips/frames-mips.h12
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc75
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc496
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h14
-rw-r--r--deps/v8/src/mips/lithium-gap-resolver-mips.cc7
-rw-r--r--deps/v8/src/mips/lithium-mips.cc162
-rw-r--r--deps/v8/src/mips/lithium-mips.h142
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc92
-rw-r--r--deps/v8/src/mips/simulator-mips.cc154
-rw-r--r--deps/v8/src/mips/simulator-mips.h6
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc656
-rw-r--r--deps/v8/src/objects-debug.cc39
-rw-r--r--deps/v8/src/objects-inl.h113
-rw-r--r--deps/v8/src/objects-printer.cc20
-rw-r--r--deps/v8/src/objects-visiting.cc2
-rw-r--r--deps/v8/src/objects.cc123
-rw-r--r--deps/v8/src/objects.h114
-rw-r--r--deps/v8/src/parser.cc204
-rw-r--r--deps/v8/src/parser.h83
-rw-r--r--deps/v8/src/platform-cygwin.cc309
-rw-r--r--deps/v8/src/platform-freebsd.cc213
-rw-r--r--deps/v8/src/platform-linux.cc365
-rw-r--r--deps/v8/src/platform-macos.cc217
-rw-r--r--deps/v8/src/platform-nullos.cc42
-rw-r--r--deps/v8/src/platform-openbsd.cc228
-rw-r--r--deps/v8/src/platform-posix.cc48
-rw-r--r--deps/v8/src/platform-solaris.cc206
-rw-r--r--deps/v8/src/platform-win32.cc228
-rw-r--r--deps/v8/src/platform.h239
-rw-r--r--deps/v8/src/preparse-data.cc4
-rw-r--r--deps/v8/src/preparser-api.cc30
-rw-r--r--deps/v8/src/preparser.cc2
-rw-r--r--deps/v8/src/preparser.h71
-rw-r--r--deps/v8/src/prettyprinter.cc2
-rw-r--r--deps/v8/src/profile-generator.cc10
-rw-r--r--deps/v8/src/proxy.js34
-rw-r--r--deps/v8/src/regexp-macro-assembler-irregexp.cc4
-rw-r--r--deps/v8/src/regexp-stack.cc15
-rw-r--r--deps/v8/src/regexp.js8
-rw-r--r--deps/v8/src/runtime-profiler.cc87
-rw-r--r--deps/v8/src/runtime-profiler.h60
-rw-r--r--deps/v8/src/runtime.cc324
-rw-r--r--deps/v8/src/runtime.h19
-rw-r--r--deps/v8/src/sampler.cc694
-rw-r--r--deps/v8/src/sampler.h120
-rw-r--r--deps/v8/src/scanner.h22
-rw-r--r--deps/v8/src/scopes.cc21
-rw-r--r--deps/v8/src/scopes.h10
-rw-r--r--deps/v8/src/serialize.h2
-rw-r--r--deps/v8/src/spaces.cc99
-rw-r--r--deps/v8/src/spaces.h59
-rw-r--r--deps/v8/src/string-stream.cc4
-rw-r--r--deps/v8/src/string.js15
-rw-r--r--deps/v8/src/stub-cache.cc89
-rw-r--r--deps/v8/src/stub-cache.h37
-rw-r--r--deps/v8/src/symbol.js6
-rw-r--r--deps/v8/src/third_party/vtune/ittnotify_config.h484
-rw-r--r--deps/v8/src/third_party/vtune/ittnotify_types.h113
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.cc499
-rw-r--r--deps/v8/src/third_party/vtune/jitprofiling.h298
-rw-r--r--deps/v8/src/third_party/vtune/v8-vtune.h69
-rw-r--r--deps/v8/src/third_party/vtune/v8vtune.gyp56
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.cc279
-rw-r--r--deps/v8/src/third_party/vtune/vtune-jit.h82
-rw-r--r--deps/v8/src/typedarray.js100
-rw-r--r--deps/v8/src/unicode-inl.h3
-rw-r--r--deps/v8/src/uri.js9
-rw-r--r--deps/v8/src/utils.cc13
-rw-r--r--deps/v8/src/utils.h10
-rw-r--r--deps/v8/src/v8.cc6
-rw-r--r--deps/v8/src/v8conversions.h13
-rw-r--r--deps/v8/src/v8globals.h14
-rw-r--r--deps/v8/src/v8natives.js110
-rw-r--r--deps/v8/src/v8utils.cc6
-rw-r--r--deps/v8/src/v8utils.h120
-rw-r--r--deps/v8/src/version.cc4
-rw-r--r--deps/v8/src/x64/assembler-x64.cc29
-rw-r--r--deps/v8/src/x64/assembler-x64.h10
-rw-r--r--deps/v8/src/x64/builtins-x64.cc29
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc226
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc103
-rw-r--r--deps/v8/src/x64/frames-x64.h12
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc34
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc266
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h14
-rw-r--r--deps/v8/src/x64/lithium-x64.cc136
-rw-r--r--deps/v8/src/x64/lithium-x64.h138
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc2
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc188
-rw-r--r--deps/v8/test/cctest/cctest.cc22
-rw-r--r--deps/v8/test/cctest/cctest.gyp1
-rw-r--r--deps/v8/test/cctest/cctest.h28
-rw-r--r--deps/v8/test/cctest/cctest.status13
-rw-r--r--deps/v8/test/cctest/test-api.cc105
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc510
-rw-r--r--deps/v8/test/cctest/test-assembler-ia32.cc54
-rw-r--r--deps/v8/test/cctest/test-assembler-mips.cc623
-rw-r--r--deps/v8/test/cctest/test-assembler-x64.cc18
-rw-r--r--deps/v8/test/cctest/test-compiler.cc74
-rw-r--r--deps/v8/test/cctest/test-cpu-profiler.cc170
-rw-r--r--deps/v8/test/cctest/test-decls.cc2
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc11
-rw-r--r--deps/v8/test/cctest/test-disasm-ia32.cc12
-rw-r--r--deps/v8/test/cctest/test-disasm-mips.cc13
-rw-r--r--deps/v8/test/cctest/test-disasm-x64.cc10
-rw-r--r--deps/v8/test/cctest/test-func-name-inference.cc95
-rw-r--r--deps/v8/test/cctest/test-global-handles.cc198
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc37
-rw-r--r--deps/v8/test/cctest/test-heap.cc231
-rw-r--r--deps/v8/test/cctest/test-log-stack-tracer.cc50
-rw-r--r--deps/v8/test/cctest/test-log.cc49
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc35
-rw-r--r--deps/v8/test/cctest/test-parsing.cc120
-rw-r--r--deps/v8/test/cctest/test-platform-win32.cc1
-rw-r--r--deps/v8/test/cctest/test-profile-generator.cc3
-rw-r--r--deps/v8/test/cctest/test-strings.cc64
-rw-r--r--deps/v8/test/cctest/test-symbols.cc13
-rw-r--r--deps/v8/test/cctest/test-utils.cc97
-rw-r--r--deps/v8/test/cctest/testcfg.py4
-rw-r--r--deps/v8/test/message/testcfg.py8
-rw-r--r--deps/v8/test/mjsunit/allocation-site-info.js8
-rw-r--r--deps/v8/test/mjsunit/bugs/bug-2615.js126
-rw-r--r--deps/v8/test/mjsunit/builtins.js4
-rw-r--r--deps/v8/test/mjsunit/external-array-no-sse2.js716
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js23
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-objects.js68
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-parsing.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/generators-runtime.js126
-rw-r--r--deps/v8/test/mjsunit/harmony/symbols.js17
-rw-r--r--deps/v8/test/mjsunit/harmony/typedarrays.js101
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status38
-rw-r--r--deps/v8/test/mjsunit/object-define-property.js5
-rwxr-xr-xdeps/v8/test/mjsunit/pixel-array-rounding.js8
-rw-r--r--deps/v8/test/mjsunit/proto-poison.js45
-rw-r--r--deps/v8/test/mjsunit/regress/readonly5.js68
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2273.js103
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2595.js57
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2606.js61
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2612.js76
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2618.js74
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2624.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-581.js46
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-229923.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js10
-rw-r--r--deps/v8/tools/gyp/v8.gyp180
-rwxr-xr-xdeps/v8/tools/mingw-generate-makefiles.sh97
-rwxr-xr-xdeps/v8/tools/nacl-run.py151
-rwxr-xr-xdeps/v8/tools/presubmit.py21
-rwxr-xr-xdeps/v8/tools/run-tests.py13
-rw-r--r--deps/v8/tools/testrunner/local/junit_output.py49
-rw-r--r--deps/v8/tools/testrunner/local/progress.py46
-rw-r--r--deps/v8/tools/testrunner/local/statusfile.py2
-rw-r--r--deps/v8/tools/tickprocessor.js21
288 files changed, 15770 insertions, 11359 deletions
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 5bd017fc0..70aa73a43 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,3 +1,22 @@
+2013-04-17: Version 3.18.0
+
+ Enabled pretenuring of fast literals in high promotion mode.
+
+ Removed preparser library; link preparser executable against full V8.
+
+ Fixed set-up of intrinsic's 'constructor' properties.
+ (Chromium issue 229445)
+
+ ES6 symbols: extended V8 API to support symbols (issue 2158).
+
+ Removed ARM support for VFP2.
+
+ Made __proto__ a real JavaScript accessor property.
+ (issue 1949 and issue 2606)
+
+ Performance and stability improvements on all platforms.
+
+
2013-04-04: Version 3.17.16
Stack trace API: poison stack frames below the first strict mode frame.
diff --git a/deps/v8/Makefile b/deps/v8/Makefile
index 8e550d012..a46b333f1 100644
--- a/deps/v8/Makefile
+++ b/deps/v8/Makefile
@@ -36,6 +36,7 @@ TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/v8
+NACL_SDK_ROOT ?=
# Special build flags. Use them like this: "make library=shared"
@@ -83,22 +84,18 @@ endif
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
endif
-# vfp2=off
-ifeq ($(vfp2), off)
- GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
-else
- GYPFLAGS += -Dv8_can_use_vfp2_instructions=true -Darm_fpu=vfpv2
-endif
-# vfp3=off
-ifeq ($(vfp3), off)
- GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
-else
- GYPFLAGS += -Dv8_can_use_vfp3_instructions=true -Darm_fpu=vfpv3
+# vtunejit=on
+ifeq ($(vtunejit), on)
+ GYPFLAGS += -Dv8_enable_vtunejit=1
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
endif
+# unalignedaccess=on
+ifeq ($(unalignedaccess), on)
+ GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
+endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
@@ -119,13 +116,66 @@ endif
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
-# hardfp=on
-ifeq ($(hardfp), on)
- GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
-endif
-# armv7=false
+# arm specific flags.
+# armv7=false/true
ifeq ($(armv7), false)
GYPFLAGS += -Darmv7=0
+else
+ifeq ($(armv7), true)
+ GYPFLAGS += -Darmv7=1
+endif
+endif
+# vfp2=off. Deprecated, use armfpu=
+# vfp3=off. Deprecated, use armfpu=
+ifeq ($(vfp3), off)
+ GYPFLAGS += -Darm_fpu=vfp
+endif
+# hardfp=on/off. Deprecated, use armfloatabi
+ifeq ($(hardfp),on)
+ GYPFLAGS += -Darm_float_abi=hard
+else
+ifeq ($(hardfp),off)
+ GYPFLAGS += -Darm_float_abi=softfp
+endif
+endif
+# armneon=on/off
+ifeq ($(armneon), on)
+ GYPFLAGS += -Darm_neon=1
+endif
+# fpu: armfpu=xxx
+# xxx: vfp, vfpv3-d16, vfpv3, neon.
+ifeq ($(armfpu),)
+ifneq ($(vfp3), off)
+ GYPFLAGS += -Darm_fpu=default
+endif
+else
+ GYPFLAGS += -Darm_fpu=$(armfpu)
+endif
+# float abi: armfloatabi=softfp/hard
+ifeq ($(armfloatabi),)
+ifeq ($(hardfp),)
+ GYPFLAGS += -Darm_float_abi=default
+endif
+else
+ GYPFLAGS += -Darm_float_abi=$(armfloatabi)
+endif
+# armthumb=on/off
+ifeq ($(armthumb), off)
+ GYPFLAGS += -Darm_thumb=0
+else
+ifeq ($(armthumb), on)
+ GYPFLAGS += -Darm_thumb=1
+endif
+endif
+# armtest=on
+# With this flag set, by default v8 will only use features implied
+# by the compiler (no probe). This is done by modifying the default
+# values of enable_armv7, enable_vfp2, enable_vfp3 and enable_32dregs.
+# Modifying these flags when launching v8 will enable the probing for
+# the specified values.
+# When using the simulator, this flag is implied.
+ifeq ($(armtest), on)
+ GYPFLAGS += -Darm_test=on
endif
# ----------------- available targets: --------------------
@@ -136,6 +186,7 @@ endif
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM
+# - "nacl" : cross-compile for Native Client (ia32 and x64)
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@@ -149,19 +200,27 @@ ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
+NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
test/cctest/cctest.gyp tools/gyp/v8.gyp
+# If vtunejit=on, the v8vtune.gyp will be appended.
+ifeq ($(vtunejit), on)
+ GYPFILES += src/third_party/vtune/v8vtune.gyp
+endif
# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
+NACL_BUILDS = $(foreach mode,$(MODES), \
+ $(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
+NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
@@ -169,7 +228,9 @@ ENVFILE = $(OUTDIR)/environment
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
- must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN
+ must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN \
+ $(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
+ must-set-NACL_SDK_ROOT
# Target definitions. "all" is the default.
all: $(MODES)
@@ -213,6 +274,16 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
+$(NACL_ARCHES): $(addprefix $$@.,$(MODES))
+
+$(NACL_BUILDS): $(GYPFILES) $(ENVFILE) \
+ Makefile.nacl must-set-NACL_SDK_ROOT
+ @$(MAKE) -f Makefile.nacl $@ \
+ ARCH="$(basename $@)" \
+ MODE="$(subst .,,$(suffix $@))" \
+ OUTDIR="$(OUTDIR)" \
+ GYPFLAGS="$(GYPFLAGS)"
+
# Test targets.
check: all
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
@@ -244,12 +315,21 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
+$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
+ @tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
+ --arch-and-mode=$(basename $@) \
+ --timeout=600 --nopresubmit \
+ --command-prefix="tools/nacl-run.py"
+
+$(addsuffix .check, $(NACL_ARCHES)): \
+ $(addprefix $$(basename $$@).,$(MODES)).check
+
native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
-$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
+$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@@ -260,7 +340,7 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
-clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean
+clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
@@ -283,6 +363,18 @@ ifndef ANDROID_TOOLCHAIN
endif
endif
+# Note that NACL_SDK_ROOT must be set to point to an appropriate
+# Native Client SDK before using this makefile. You can download
+# an SDK here:
+# https://developers.google.com/native-client/sdk/download
+# The path indicated by NACL_SDK_ROOT will typically end with
+# a folder for a pepper version such as "pepper_25" that should
+# have "tools" and "toolchain" subdirectories.
+must-set-NACL_SDK_ROOT:
+ifndef NACL_SDK_ROOT
+ $(error NACL_SDK_ROOT must be set)
+endif
+
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new
diff --git a/deps/v8/Makefile.nacl b/deps/v8/Makefile.nacl
new file mode 100644
index 000000000..e8fc3d252
--- /dev/null
+++ b/deps/v8/Makefile.nacl
@@ -0,0 +1,96 @@
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Those definitions should be consistent with the main Makefile
+NACL_ARCHES = nacl_ia32 nacl_x64
+MODES = release debug
+
+# Generates all combinations of NACL ARCHES and MODES,
+# e.g. "nacl_ia32.release" or "nacl_x64.release"
+NACL_BUILDS = $(foreach mode,$(MODES), \
+ $(addsuffix .$(mode),$(NACL_ARCHES)))
+
+HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
+ifeq ($(HOST_OS), linux)
+ TOOLCHAIN_DIR = linux_x86_glibc
+else
+ ifeq ($(HOST_OS), mac)
+ TOOLCHAIN_DIR = mac_x86_glibc
+ else
+ $(error Host platform "${HOST_OS}" is not supported)
+ endif
+endif
+
+TOOLCHAIN_PATH = ${NACL_SDK_ROOT}/toolchain
+NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
+
+ifeq ($(ARCH), nacl_ia32)
+ GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
+ TOOLCHAIN_ARCH = x86-4.4
+ NACL_CC = "$(NACL_TOOLCHAIN)/bin/i686-nacl-gcc"
+ NACL_CXX = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
+ NACL_LINK = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
+else
+ ifeq ($(ARCH), nacl_x64)
+ GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
+ TOOLCHAIN_ARCH = x86-4.4
+ NACL_CC = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-gcc"
+ NACL_CXX = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
+ NACL_LINK = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
+ else
+ $(error Target architecture "${ARCH}" is not supported)
+ endif
+endif
+
+ifeq ($(wildcard $(NACL_TOOLCHAIN)),)
+ $(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}")
+endif
+
+# For mksnapshot host generation.
+GYPENV += host_os=${HOST_OS}
+
+NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
+.SECONDEXPANSION:
+# For some reason the $$(basename $$@) expansion didn't work here...
+$(NACL_BUILDS): $(NACL_MAKEFILES)
+ @$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
+ CXX=${NACL_CXX} \
+ LINK=${NACL_LINK} \
+ BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
+ python -c "print raw_input().capitalize()") \
+ builddir="$(shell pwd)/$(OUTDIR)/$@"
+
+# NACL GYP file generation targets.
+$(NACL_MAKEFILES):
+ @GYP_GENERATORS=make \
+ GYP_DEFINES="${GYPENV}" \
+ CC=${NACL_CC} \
+ CXX=${NACL_CXX} \
+ build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
+ -Ibuild/standalone.gypi --depth=. \
+ -S.$(subst .,,$(suffix $@)) $(GYPFLAGS)
diff --git a/deps/v8/PRESUBMIT.py b/deps/v8/PRESUBMIT.py
index 0077be941..7d6620384 100644
--- a/deps/v8/PRESUBMIT.py
+++ b/deps/v8/PRESUBMIT.py
@@ -53,6 +53,7 @@ def _CommonChecks(input_api, output_api):
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
+ results.extend(_V8PresubmitChecks(input_api, output_api))
return results
@@ -67,5 +68,4 @@ def CheckChangeOnCommit(input_api, output_api):
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
- results.extend(_V8PresubmitChecks(input_api, output_api))
return results
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 38ed4f4c9..0b1f39726 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -35,6 +35,13 @@
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
+ # Native Client builds currently use the V8 ARM JIT and
+ # arm/simulator-arm.cc to defer the significant effort required
+ # for NaCl JIT support. The nacl_target_arch variable provides
+ # the 'true' target arch for places in this file that need it.
+ # TODO(bradchen): get rid of nacl_target_arch when someday
+ # NaCl V8 builds stop using the ARM simulator
+ 'nacl_target_arch%': 'none', # must be set externally
# Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
# generated by V8 to do unaligned memory access, and setting it to 'false'
@@ -44,30 +51,17 @@
# access is allowed for all CPUs.
'v8_can_use_unaligned_accesses%': 'default',
- # Setting 'v8_can_use_vfp2_instructions' to 'true' will enable use of ARM VFP
- # instructions in the V8 generated code. VFP instructions will be enabled
- # both for the snapshot and for the ARM target. Leaving the default value
- # of 'false' will avoid VFP instructions in the snapshot and use CPU feature
- # probing when running on the target.
- 'v8_can_use_vfp2_instructions%': 'false',
- 'v8_can_use_vfp3_instructions%': 'false',
-
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
# ARM target. Leaving the default value of 'false' will avoid the use of
# these registers in the snapshot and use CPU feature probing when running
# on the target.
'v8_can_use_vfp32dregs%': 'false',
+ 'arm_test%': 'off',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
- # Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
- # EABI calling convention where double arguments are passed in VFP
- # registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
- # well when compiling for the ARM target.
- 'v8_use_arm_eabi_hardfloat%': 'false',
-
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
@@ -136,55 +130,105 @@
'defines': [
'V8_TARGET_ARCH_ARM',
],
+ 'variables': {
+ 'armsimulator': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: arm" && echo "no" || echo "yes")',
+ },
'conditions': [
- ['armv7==1', {
- 'defines': [
- 'CAN_USE_ARMV7_INSTRUCTIONS=1',
- ],
- }],
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
],
- }],
- [ 'v8_can_use_unaligned_accesses=="false"', {
+ }, {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
- # NEON implies VFP3 and VFP3 implies VFP2.
- [ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
- arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
- 'defines': [
- 'CAN_USE_VFP2_INSTRUCTIONS',
- ],
- }],
- # NEON implies VFP3.
- [ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
- arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
- 'defines': [
- 'CAN_USE_VFP3_INSTRUCTIONS',
- ],
- }],
- [ 'v8_use_arm_eabi_hardfloat=="true"', {
- 'defines': [
- 'USE_EABI_HARDFLOAT=1',
- 'CAN_USE_VFP2_INSTRUCTIONS',
- ],
+ ['armsimulator=="no"', {
'target_conditions': [
['_toolset=="target"', {
- 'cflags': ['-mfloat-abi=hard',],
+ 'conditions': [
+ [ 'armv7==1', {
+ 'cflags': ['-march=armv7-a',],
+ }],
+ [ 'armv7==1 or armv7=="default"', {
+ 'conditions': [
+ [ 'arm_neon==1', {
+ 'cflags': ['-mfpu=neon',],
+ },
+ {
+ 'conditions': [
+ [ 'arm_fpu!="default"', {
+ 'cflags': ['-mfpu=<(arm_fpu)',],
+ }],
+ ]
+ }],
+ ]
+ }],
+ [ 'arm_float_abi!="default"', {
+ 'cflags': ['-mfloat-abi=<(arm_float_abi)',],
+ }],
+ [ 'arm_thumb==1', {
+ 'cflags': ['-mthumb',],
+ }],
+ [ 'arm_thumb==0', {
+ 'cflags': ['-marm',],
+ }],
+ ],
}],
],
- }, {
- 'defines': [
- 'USE_EABI_HARDFLOAT=0',
+ 'conditions': [
+ [ 'arm_test=="on"', {
+ 'defines': [
+ 'ARM_TEST',
+ ],
+ }],
],
}],
- [ 'v8_can_use_vfp32dregs=="true"', {
+ ['armsimulator=="yes"', {
'defines': [
- 'CAN_USE_VFP32DREGS',
+ 'ARM_TEST',
],
+ 'conditions': [
+ [ 'armv7==1 or armv7=="default"', {
+ 'defines': [
+ 'CAN_USE_ARMV7_INSTRUCTIONS=1',
+ ],
+ 'conditions': [
+ [ 'arm_fpu=="default"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3-d16"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ ],
+ }],
+ [ 'arm_fpu=="vfpv3"', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
+ [ 'arm_fpu=="neon" or arm_neon==1', {
+ 'defines': [
+ 'CAN_USE_VFP3_INSTRUCTIONS',
+ 'CAN_USE_VFP32DREGS',
+ ],
+ }],
+ ],
+ }],
+ [ 'arm_float_abi=="hard"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=1',
+ ],
+ }],
+ [ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
+ 'defines': [
+ 'USE_EABI_HARDFLOAT=0',
+ ],
+ }],
+ ]
}],
],
}], # v8_target_arch=="arm"
@@ -320,7 +364,8 @@
'clang%': 0,
},
'conditions': [
- ['OS!="android" or clang==1', {
+ ['(OS!="android" or clang==1) and \
+ nacl_target_arch!="nacl_x64"', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
}],
@@ -409,6 +454,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
+ 'cflags!': [
+ '-O2',
+ '-Os',
+ ],
+ 'cflags': [
+ '-fdata-sections',
+ '-ffunction-sections',
+ '-O3',
+ ],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
diff --git a/deps/v8/build/standalone.gypi b/deps/v8/build/standalone.gypi
index 749755c7c..dde05757d 100644
--- a/deps/v8/build/standalone.gypi
+++ b/deps/v8/build/standalone.gypi
@@ -76,9 +76,11 @@
}],
],
# Default ARM variable settings.
- 'armv7%': 1,
+ 'armv7%': 'default',
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
+ 'arm_float_abi%': 'default',
+ 'arm_thumb': 'default',
},
'target_defaults': {
'default_configuration': 'Debug',
diff --git a/deps/v8/include/v8-preparser.h b/deps/v8/include/v8-preparser.h
index 389949d20..3e39823d6 100644
--- a/deps/v8/include/v8-preparser.h
+++ b/deps/v8/include/v8-preparser.h
@@ -115,4 +115,6 @@ PreParserData V8EXPORT Preparse(UnicodeInputStream* input,
} // namespace v8.
+#undef V8EXPORT
+
#endif // PREPARSER_H
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 68f377c60..5c5c7a946 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -173,7 +173,7 @@ class V8EXPORT CpuProfiler {
*/
/** Deprecated. Use GetProfileCount instead. */
- static int GetProfilesCount();
+ V8_DEPRECATED(static int GetProfilesCount());
/**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
@@ -181,25 +181,26 @@ class V8EXPORT CpuProfiler {
int GetProfileCount();
/** Deprecated. Use GetCpuProfile instead. */
- static const CpuProfile* GetProfile(
+ V8_DEPRECATED(static const CpuProfile* GetProfile(
int index,
- Handle<Value> security_token = Handle<Value>());
+ Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(
int index,
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use FindProfile instead. */
- static const CpuProfile* FindProfile(
+ V8_DEPRECATED(static const CpuProfile* FindProfile(
unsigned uid,
- Handle<Value> security_token = Handle<Value>());
+ Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by uid. */
const CpuProfile* FindCpuProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use StartCpuProfiling instead. */
- static void StartProfiling(Handle<String> title, bool record_samples = false);
+ V8_DEPRECATED(static void StartProfiling(Handle<String> title,
+ bool record_samples = false));
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
@@ -214,9 +215,9 @@ class V8EXPORT CpuProfiler {
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StopCpuProfiling instead. */
- static const CpuProfile* StopProfiling(
+ V8_DEPRECATED(static const CpuProfile* StopProfiling(
Handle<String> title,
- Handle<Value> security_token = Handle<Value>());
+ Handle<Value> security_token = Handle<Value>()));
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
@@ -226,7 +227,7 @@ class V8EXPORT CpuProfiler {
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use DeleteAllCpuProfiles instead. */
- static void DeleteAllProfiles();
+ V8_DEPRECATED(static void DeleteAllProfiles());
/**
* Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their
@@ -425,22 +426,23 @@ class V8EXPORT HeapProfiler {
(uint16_t class_id, Handle<Value> wrapper);
/** Deprecated. Use GetSnapshotCount instead. */
- static int GetSnapshotsCount();
+ V8_DEPRECATED(static int GetSnapshotsCount());
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
/** Deprecated. Use GetHeapSnapshot instead. */
- static const HeapSnapshot* GetSnapshot(int index);
+ V8_DEPRECATED(static const HeapSnapshot* GetSnapshot(int index));
/** Returns a snapshot by index. */
const HeapSnapshot* GetHeapSnapshot(int index);
/** Deprecated. Use FindHeapSnapshot instead. */
- static const HeapSnapshot* FindSnapshot(unsigned uid);
+ V8_DEPRECATED(static const HeapSnapshot* FindSnapshot(unsigned uid));
/** Returns a profile by uid. */
const HeapSnapshot* FindHeapSnapshot(unsigned uid);
/** Deprecated. Use GetObjectId instead. */
- static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
+ V8_DEPRECATED(static SnapshotObjectId GetSnapshotObjectId(
+ Handle<Value> value));
/**
* Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
@@ -469,11 +471,11 @@ class V8EXPORT HeapProfiler {
};
/** Deprecated. Use TakeHeapSnapshot instead. */
- static const HeapSnapshot* TakeSnapshot(
+ V8_DEPRECATED(static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL,
- ObjectNameResolver* global_object_name_resolver = NULL);
+ ObjectNameResolver* global_object_name_resolver = NULL));
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
*/
@@ -484,7 +486,7 @@ class V8EXPORT HeapProfiler {
/** Deprecated. Use StartTrackingHeapObjects instead. */
- static void StartHeapObjectsTracking();
+ V8_DEPRECATED(static void StartHeapObjectsTracking());
/**
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
@@ -493,7 +495,8 @@ class V8EXPORT HeapProfiler {
void StartTrackingHeapObjects();
/** Deprecated. Use GetHeapStats instead. */
- static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
+ V8_DEPRECATED(static SnapshotObjectId PushHeapObjectsStats(
+ OutputStream* stream));
/**
* Adds a new time interval entry to the aggregated statistics array. The
* time interval entry contains information on the current heap objects
@@ -509,7 +512,7 @@ class V8EXPORT HeapProfiler {
SnapshotObjectId GetHeapStats(OutputStream* stream);
/** Deprecated. Use StopTrackingHeapObjects instead. */
- static void StopHeapObjectsTracking();
+ V8_DEPRECATED(static void StopHeapObjectsTracking());
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
@@ -518,7 +521,7 @@ class V8EXPORT HeapProfiler {
void StopTrackingHeapObjects();
/** Deprecated. Use DeleteAllHeapSnapshots instead. */
- static void DeleteAllSnapshots();
+ V8_DEPRECATED(static void DeleteAllSnapshots());
/**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
@@ -526,9 +529,9 @@ class V8EXPORT HeapProfiler {
void DeleteAllHeapSnapshots();
/** Deprecated. Use SetWrapperClassInfoProvider instead. */
- static void DefineWrapperClass(
+ V8_DEPRECATED(static void DefineWrapperClass(
uint16_t class_id,
- WrapperInfoCallback callback);
+ WrapperInfoCallback callback));
/** Binds a callback to embedder's class ID. */
void SetWrapperClassInfoProvider(
uint16_t class_id,
@@ -544,10 +547,10 @@ class V8EXPORT HeapProfiler {
/**
* Deprecated. Returns the number of currently existing persistent handles.
*/
- static int GetPersistentHandleCount();
+ V8_DEPRECATED(static int GetPersistentHandleCount());
/** Deprecated. Use GetHeapProfilerMemorySize instead. */
- static size_t GetMemorySizeUsedByProfiler();
+ V8_DEPRECATED(static size_t GetMemorySizeUsedByProfiler());
/** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize();
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index 9adb1c041..e1c020310 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -127,6 +127,8 @@ class StackFrame;
class StackTrace;
class String;
class StringObject;
+class Symbol;
+class SymbolObject;
class Uint32;
class Utils;
class Value;
@@ -764,6 +766,17 @@ class V8EXPORT Script {
* debugger API.
*/
void SetData(Handle<String> data);
+
+ /**
+ * Returns the name value of one Script.
+ */
+ Handle<Value> GetScriptName();
+
+ /**
+ * Returns zero based line number of the code_pos location in the script.
+ * -1 will be returned if no information available.
+ */
+ int GetLineNumber(int code_pos);
};
@@ -973,6 +986,12 @@ class V8EXPORT Value : public Data {
V8_INLINE(bool IsString() const);
/**
+ * Returns true if this value is a symbol.
+ * This is an experimental feature.
+ */
+ bool IsSymbol() const;
+
+ /**
* Returns true if this value is a function.
*/
bool IsFunction() const;
@@ -1033,6 +1052,12 @@ class V8EXPORT Value : public Data {
bool IsStringObject() const;
/**
+ * Returns true if this value is a Symbol object.
+ * This is an experimental feature.
+ */
+ bool IsSymbolObject() const;
+
+ /**
* Returns true if this value is a NativeError.
*/
bool IsNativeError() const;
@@ -1311,7 +1336,11 @@ class V8EXPORT String : public Primitive {
/** Allocates a new string from 16-bit character codes.*/
static Local<String> New(const uint16_t* data, int length = -1);
- /** Creates a symbol. Returns one if it exists already.*/
+ /**
+ * Creates an internalized string (historically called a "symbol",
+ * not to be confused with ES6 symbols). Returns one if it exists already.
+ * TODO(rossberg): Deprecate me when the new string API is here.
+ */
static Local<String> NewSymbol(const char* data, int length = -1);
/**
@@ -1450,6 +1479,29 @@ class V8EXPORT String : public Primitive {
/**
+ * A JavaScript symbol (ECMA-262 edition 6)
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8EXPORT Symbol : public Primitive {
+ public:
+ // Returns the print name string of the symbol, or undefined if none.
+ Local<Value> Name() const;
+
+ // Create a symbol without a print name.
+ static Local<Symbol> New(Isolate* isolate);
+
+ // Create a symbol with a print name.
+ static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
+
+ V8_INLINE(static Symbol* Cast(v8::Value* obj));
+ private:
+ Symbol();
+ static void CheckCast(v8::Value* obj);
+};
+
+
+/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
class V8EXPORT Number : public Primitive {
@@ -1590,11 +1642,9 @@ class V8EXPORT Object : public Value {
*/
PropertyAttribute GetPropertyAttributes(Handle<Value> key);
- // TODO(1245389): Replace the type-specific versions of these
- // functions with generic ones that accept a Handle<Value> key.
- bool Has(Handle<String> key);
+ bool Has(Handle<Value> key);
- bool Delete(Handle<String> key);
+ bool Delete(Handle<Value> key);
// Delete a property on this object bypassing interceptors and
// ignoring dont-delete attributes.
@@ -1982,6 +2032,27 @@ class V8EXPORT StringObject : public Object {
/**
+ * A Symbol object (ECMA-262 edition 6).
+ *
+ * This is an experimental feature. Use at your own risk.
+ */
+class V8EXPORT SymbolObject : public Object {
+ public:
+ static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
+
+ /**
+ * Returns the Symbol held by the object.
+ */
+ Local<Symbol> SymbolValue() const;
+
+ V8_INLINE(static SymbolObject* Cast(v8::Value* obj));
+
+ private:
+ static void CheckCast(v8::Value* obj);
+};
+
+
+/**
* An instance of the built-in RegExp constructor (ECMA-262, 15.10).
*/
class V8EXPORT RegExp : public Object {
@@ -3036,6 +3107,9 @@ class V8EXPORT Isolate {
*/
CpuProfiler* GetCpuProfiler();
+ /** Returns the context that is on the top of the stack. */
+ Local<Context> GetCurrentContext();
+
private:
Isolate();
Isolate(const Isolate&);
@@ -3864,11 +3938,11 @@ class V8EXPORT Context {
*/
void ReattachGlobal(Handle<Object> global_object);
- /** Creates a new context.
+ /**
+ * Creates a new context and returns a handle to the newly allocated
+ * context.
*
- * Returns a persistent handle to the newly allocated context. This
- * persistent handle has to be disposed when the context is no
- * longer used so the context can be garbage collected.
+ * \param isolate The isolate in which to create the context.
*
* \param extensions An optional extension configuration containing
* the extensions to be installed in the newly created context.
@@ -3882,6 +3956,14 @@ class V8EXPORT Context {
* template. The state of the global object will be completely reset
* and only object identify will remain.
*/
+ static Local<Context> New(
+ Isolate* isolate,
+ ExtensionConfiguration* extensions = NULL,
+ Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
+ Handle<Value> global_object = Handle<Value>());
+
+ /** Deprecated. Use Isolate version instead. */
+ // TODO(mstarzinger): Put this behind the V8_DEPRECATED guard.
static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
@@ -3890,7 +3972,8 @@ class V8EXPORT Context {
/** Returns the last entered context. */
static Local<Context> GetEntered();
- /** Returns the context that is on the top of the stack. */
+ // TODO(svenpanne) Actually deprecate this.
+ /** Deprecated. Use Isolate::GetCurrentContext instead. */
static Local<Context> GetCurrent();
/**
@@ -4301,7 +4384,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptyStringRootIndex = 119;
+ static const int kEmptyStringRootIndex = 118;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@@ -4847,6 +4930,14 @@ bool Value::QuickIsString() const {
}
+Symbol* Symbol::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<Symbol*>(value);
+}
+
+
Number* Number::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -4879,6 +4970,14 @@ StringObject* StringObject::Cast(v8::Value* value) {
}
+SymbolObject* SymbolObject::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+ CheckCast(value);
+#endif
+ return static_cast<SymbolObject*>(value);
+}
+
+
NumberObject* NumberObject::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
diff --git a/deps/v8/preparser/preparser-process.cc b/deps/v8/preparser/preparser-process.cc
index 1bcc80492..b81674430 100644
--- a/deps/v8/preparser/preparser-process.cc
+++ b/deps/v8/preparser/preparser-process.cc
@@ -30,6 +30,7 @@
#include <stdio.h>
#include <string.h>
+#include "../include/v8.h"
#include "../include/v8stdint.h"
#include "../include/v8-preparser.h"
@@ -37,8 +38,7 @@
namespace i = v8::internal;
-// This file is only used for testing the stand-alone preparser
-// library.
+// This file is only used for testing the preparser.
// The first argument must be the path of a JavaScript source file, or
// the flags "-e" and the next argument is then the source of a JavaScript
// program.
@@ -320,6 +320,8 @@ int main(int argc, const char* argv[]) {
ExceptionExpectation expects =
ParseExpectation(argc - arg_index, argv + arg_index);
+ v8::V8::Initialize();
+
ScopedPointer<uint8_t> buffer;
size_t length;
diff --git a/deps/v8/preparser/preparser.gyp b/deps/v8/preparser/preparser.gyp
index 0b0338288..863a2ff8a 100644
--- a/deps/v8/preparser/preparser.gyp
+++ b/deps/v8/preparser/preparser.gyp
@@ -31,11 +31,24 @@
{
'target_name': 'preparser',
'type': 'executable',
- 'dependencies': [
- '../tools/gyp/v8.gyp:preparser_lib',
+ 'conditions': [
+ # preparser can't link against a shared library, so link against
+ # the underlying static targets.
+ ['v8_use_snapshot=="true"', {
+ 'dependencies': ['../tools/gyp/v8.gyp:v8_snapshot'],
+ }, {
+ 'dependencies': [
+ '../tools/gyp/v8.gyp:v8_nosnapshot.<(v8_target_arch)',
+ ],
+ }],
+ ],
+ 'include_dirs+': [
+ '../src',
],
'sources': [
'preparser-process.cc',
+ '../include/v8-preparser.h',
+ '../src/preparser-api.cc',
],
},
],
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index 57062be41..0b0f9b075 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -783,64 +783,6 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
-// Accessors::ObjectPrototype
-//
-
-
-static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
- Object* receiver) {
- Object* current = receiver->GetPrototype(isolate);
- while (current->IsJSObject() &&
- JSObject::cast(current)->map()->is_hidden_prototype()) {
- current = current->GetPrototype(isolate);
- }
- return current;
-}
-
-
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver);
-}
-
-
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
- Object* value_raw,
- void*) {
- const bool kSkipHiddenPrototypes = true;
- // To be consistent with other Set functions, return the value.
- if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
- return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
-
- Isolate* isolate = receiver_raw->GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> receiver(receiver_raw);
- Handle<Object> value(value_raw, isolate);
- Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
- isolate);
-
- MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
- isolate);
- if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(receiver, "prototype",
- isolate->factory()->proto_string(),
- old_value);
- }
- return *hresult;
-}
-
-
-const AccessorDescriptor Accessors::ObjectPrototype = {
- ObjectGetPrototype,
- ObjectSetPrototype,
- 0
-};
-
-
-//
// Accessors::MakeModuleExport
//
diff --git a/deps/v8/src/accessors.h b/deps/v8/src/accessors.h
index 250f742fa..0740d92e5 100644
--- a/deps/v8/src/accessors.h
+++ b/deps/v8/src/accessors.h
@@ -56,8 +56,7 @@ namespace internal {
V(ScriptContextData) \
V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \
- V(ScriptEvalFromFunctionName) \
- V(ObjectPrototype)
+ V(ScriptEvalFromFunctionName)
// Accessors contains all predefined proxy accessors.
@@ -111,10 +110,6 @@ class Accessors : public AllStatic {
static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
- static MaybeObject* ObjectGetPrototype(Object* receiver, void*);
- static MaybeObject* ObjectSetPrototype(JSObject* receiver,
- Object* value,
- void*);
// Helper functions.
static Object* FlattenNumber(Object* value);
diff --git a/deps/v8/src/allocation.cc b/deps/v8/src/allocation.cc
index 6c7a08cec..94aaad3fd 100644
--- a/deps/v8/src/allocation.cc
+++ b/deps/v8/src/allocation.cc
@@ -28,8 +28,8 @@
#include "allocation.h"
#include <stdlib.h> // For free, malloc.
-#include <string.h> // For memcpy.
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
@@ -85,7 +85,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
@@ -95,7 +95,7 @@ char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
- memcpy(result, str, length);
+ OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 65663ba5c..dddcd7dcf 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -1655,7 +1655,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(&stream);
}
@@ -1664,10 +1664,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(&stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
- return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
+ return i::PreParserApi::PreParse(&stream);
}
}
@@ -1686,7 +1686,8 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
- i::OS::MemCopy(deserialized_data, data, length);
+ i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
+ data, static_cast<size_t>(length));
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@@ -1852,6 +1853,34 @@ Local<Value> Script::Id() {
}
+int Script::GetLineNumber(int code_pos) {
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
+ LOG_API(isolate, "Script::GetLineNumber");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsScript()) {
+ i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
+ return i::GetScriptLineNumber(script, code_pos);
+ } else {
+ return -1;
+ }
+}
+
+
+Handle<Value> Script::GetScriptName() {
+ i::Isolate* isolate = i::Isolate::Current();
+ ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
+ LOG_API(isolate, "Script::GetName");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ if (obj->IsScript()) {
+ i::Object* name = i::Script::cast(*obj)->name();
+ return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
+ } else {
+ return Handle<String>();
+ }
+}
+
+
void Script::SetData(v8::Handle<String> data) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::SetData()", return);
@@ -2367,6 +2396,12 @@ bool Value::FullIsString() const {
}
+bool Value::IsSymbol() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsSymbol()")) return false;
+ return Utils::OpenHandle(this)->IsSymbol();
+}
+
+
bool Value::IsArray() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
@@ -2451,6 +2486,16 @@ bool Value::IsStringObject() const {
}
+bool Value::IsSymbolObject() const {
+ // TODO(svenpanne): these and other test functions should be written such
+ // that they do not use Isolate::Current().
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::Value::IsSymbolObject()")) return false;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
+}
+
+
bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
@@ -2664,6 +2709,15 @@ void v8::String::CheckCast(v8::Value* that) {
}
+void v8::Symbol::CheckCast(v8::Value* that) {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsSymbol(),
+ "v8::Symbol::Cast()",
+ "Could not convert to symbol");
+}
+
+
void v8::Number::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@@ -2711,6 +2765,16 @@ void v8::StringObject::CheckCast(v8::Value* that) {
}
+void v8::SymbolObject::CheckCast(v8::Value* that) {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::SymbolObject::Cast()")) return;
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
+ "v8::SymbolObject::Cast()",
+ "Could not convert to SymbolObject");
+}
+
+
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
@@ -3079,13 +3143,13 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
- if (!key_obj->IsString()) {
+ if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
- i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
- PropertyAttributes result = self->GetPropertyAttribute(*key_string);
+ i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
+ PropertyAttributes result = self->GetPropertyAttribute(*key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@@ -3215,7 +3279,7 @@ Local<String> v8::Object::ObjectProtoToString() {
// Write prefix.
char* ptr = buf.start();
- memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
// Write real content.
@@ -3223,7 +3287,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ptr += str_len;
// Write postfix.
- memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
+ i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::New(buf.start(), buf_len);
@@ -3255,24 +3319,32 @@ Local<String> v8::Object::GetConstructorName() {
}
-bool v8::Object::Delete(v8::Handle<String> key) {
+bool v8::Object::Delete(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj = i::DeleteProperty(self, key_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return obj->IsTrue();
}
-bool v8::Object::Has(v8::Handle<String> key) {
+bool v8::Object::Has(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Has()", return false);
ENTER_V8(isolate);
- i::Handle<i::JSObject> self = Utils::OpenHandle(this);
- i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
- return self->HasProperty(*key_obj);
+ i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
+ i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
+ EXCEPTION_PREAMBLE(isolate);
+ i::Handle<i::Object> obj = i::HasProperty(self, key_obj);
+ has_pending_exception = obj.is_null();
+ EXCEPTION_BAILOUT_CHECK(isolate, false);
+ return obj->IsTrue();
}
@@ -3352,15 +3424,16 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) {
ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
return false);
return Utils::OpenHandle(this)->HasRealNamedProperty(
+ isolate,
*Utils::OpenHandle(*key));
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
- ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
- "v8::Object::HasRealIndexedProperty()",
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
return false);
- return Utils::OpenHandle(this)->HasRealElementProperty(index);
+ return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index);
}
@@ -3371,6 +3444,7 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
return false);
ENTER_V8(isolate);
return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
+ isolate,
*Utils::OpenHandle(*key));
}
@@ -4591,6 +4665,15 @@ const v8::String::ExternalAsciiStringResource*
}
+Local<Value> Symbol::Name() const {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Name()"))
+ return Local<Value>();
+ i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
+ i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
+ return Utils::ToLocal(name);
+}
+
+
double Number::Value() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@@ -4861,18 +4944,14 @@ static i::Handle<i::FunctionTemplateInfo>
}
-Persistent<Context> v8::Context::New(
+static i::Handle<i::Context> CreateEnvironment(
+ i::Isolate* isolate,
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
- i::Isolate::EnsureDefaultIsolate();
- i::Isolate* isolate = i::Isolate::Current();
- EnsureInitializedForIsolate(isolate, "v8::Context::New()");
- LOG_API(isolate, "Context::New");
- ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
+ i::Handle<i::Context> env;
// Enter V8 via an ENTER_V8 scope.
- i::Handle<i::Context> env;
{
ENTER_V8(isolate);
v8::Handle<ObjectTemplate> proxy_template = global_template;
@@ -4927,10 +5006,43 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
- if (env.is_null()) {
- return Persistent<Context>();
- }
- return Persistent<Context>(Utils::ToLocal(env));
+ return env;
+}
+
+
+Persistent<Context> v8::Context::New(
+ v8::ExtensionConfiguration* extensions,
+ v8::Handle<ObjectTemplate> global_template,
+ v8::Handle<Value> global_object) {
+ i::Isolate::EnsureDefaultIsolate();
+ i::Isolate* isolate = i::Isolate::Current();
+ Isolate* external_isolate = reinterpret_cast<Isolate*>(isolate);
+ EnsureInitializedForIsolate(isolate, "v8::Context::New()");
+ LOG_API(isolate, "Context::New");
+ ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
+ i::HandleScope scope(isolate);
+ i::Handle<i::Context> env =
+ CreateEnvironment(isolate, extensions, global_template, global_object);
+ if (env.is_null()) return Persistent<Context>();
+ return Persistent<Context>::New(external_isolate, Utils::ToLocal(env));
+}
+
+
+Local<Context> v8::Context::New(
+ v8::Isolate* external_isolate,
+ v8::ExtensionConfiguration* extensions,
+ v8::Handle<ObjectTemplate> global_template,
+ v8::Handle<Value> global_object) {
+ i::Isolate::EnsureDefaultIsolate();
+ i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
+ EnsureInitializedForIsolate(isolate, "v8::Context::New()");
+ LOG_API(isolate, "Context::New");
+ ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
+ i::HandleScope scope(isolate);
+ i::Handle<i::Context> env =
+ CreateEnvironment(isolate, extensions, global_template, global_object);
+ if (env.is_null()) return Local<Context>();
+ return Utils::ToLocal(scope.CloseAndEscape(env));
}
@@ -5005,10 +5117,7 @@ v8::Local<v8::Context> Context::GetCurrent() {
if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
return Local<Context>();
}
- i::Handle<i::Object> current = isolate->native_context();
- if (current.is_null()) return Local<Context>();
- i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
- return Utils::ToLocal(context);
+ return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
}
@@ -5429,6 +5538,29 @@ Local<v8::String> v8::StringObject::StringValue() const {
}
+Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
+ LOG_API(i_isolate, "SymbolObject::New");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Object> obj =
+ i_isolate->factory()->ToObject(Utils::OpenHandle(*value));
+ return Utils::ToLocal(obj);
+}
+
+
+Local<v8::Symbol> v8::SymbolObject::SymbolValue() const {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::SymbolObject::SymbolValue()"))
+ return Local<v8::Symbol>();
+ LOG_API(isolate, "SymbolObject::SymbolValue");
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
+ return Utils::ToLocal(
+ i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
+}
+
+
Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()");
@@ -5610,6 +5742,30 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
}
+Local<Symbol> v8::Symbol::New(Isolate* isolate) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
+ LOG_API(i_isolate, "Symbol::New()");
+ ENTER_V8(i_isolate);
+ i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ return Utils::ToLocal(result);
+}
+
+
+Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
+ i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+ EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
+ LOG_API(i_isolate, "Symbol::New(char)");
+ ENTER_V8(i_isolate);
+ if (length == -1) length = i::StrLength(data);
+ i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
+ i::Vector<const char>(data, length));
+ i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
+ result->set_name(*name);
+ return Utils::ToLocal(result);
+}
+
+
Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()");
@@ -5813,6 +5969,15 @@ CpuProfiler* Isolate::GetCpuProfiler() {
}
+v8::Local<v8::Context> Isolate::GetCurrentContext() {
+ i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
+ i::Handle<i::Object> current = internal_isolate->native_context();
+ if (current.is_null()) return Local<Context>();
+ i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
+ return Utils::ToLocal(context);
+}
+
+
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
@@ -7086,7 +7251,7 @@ char* HandleScopeImplementer::ArchiveThread(char* storage) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
handle_scope_data_ = *current;
- memcpy(storage, this, sizeof(*this));
+ OS::MemCopy(storage, this, sizeof(*this));
ResetAfterArchive();
current->Initialize();
@@ -7101,7 +7266,7 @@ int HandleScopeImplementer::ArchiveSpacePerThread() {
char* HandleScopeImplementer::RestoreThread(char* storage) {
- memcpy(this, storage, sizeof(*this));
+ OS::MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index d73646d99..0cd16f1f0 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -171,6 +171,7 @@ class RegisteredExtension {
V(Object, JSObject) \
V(Array, JSArray) \
V(String, String) \
+ V(Symbol, Symbol) \
V(Script, Object) \
V(Function, JSFunction) \
V(Message, JSObject) \
@@ -196,6 +197,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj);
+ static inline Local<Symbol> ToLocal(
+ v8::internal::Handle<v8::internal::Symbol> obj);
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
@@ -268,6 +271,7 @@ MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, String, String)
+MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
diff --git a/deps/v8/src/arguments.h b/deps/v8/src/arguments.h
index f8fb00c57..1423d5642 100644
--- a/deps/v8/src/arguments.h
+++ b/deps/v8/src/arguments.h
@@ -115,15 +115,18 @@ class CustomArguments : public Relocatable {
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_FUNCTION(Type, Name) \
-Type Name(Arguments args, Isolate* isolate)
-
-
-#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
-
+Type Name(int args_length, Object** args_object, Isolate* isolate)
+
+#define RUNTIME_FUNCTION(Type, Name) \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
+Type Name(int args_length, Object** args_object, Isolate* isolate) { \
+ Arguments args(args_length, args_object); \
+ return __RT_impl_##Name(args, isolate); \
+} \
+static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
+
+#define RUNTIME_ARGUMENTS(isolate, args) \
+ args.length(), args.arguments(), isolate
} } // namespace v8::internal
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index 123013b0a..0f9630b34 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -48,29 +48,17 @@ namespace internal {
int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
- }
+ return kMaxNumAllocatableRegisters;
}
int DwVfpRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
- } else {
- return 1;
- }
+ return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
int DwVfpRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(VFP2)) {
- return NumRegisters() - kNumReservedRegisters;
- } else {
- return 1;
- }
+ return NumRegisters() - kNumReservedRegisters;
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 1574d51bb..bc21b6401 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -63,29 +63,21 @@ ExternalReference ExternalReference::cpu_features() {
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
- answer |= 1u << ARMv7;
+ if (FLAG_enable_armv7) {
+ answer |= 1u << ARMv7;
+ }
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
- answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
+ if (FLAG_enable_vfp3) {
+ answer |= 1u << VFP3 | 1u << ARMv7;
+ }
#endif // CAN_USE_VFP3_INSTRUCTIONS
-#ifdef CAN_USE_VFP2_INSTRUCTIONS
- answer |= 1u << VFP2;
-#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef CAN_USE_VFP32DREGS
- answer |= 1u << VFP32DREGS;
+ if (FLAG_enable_32dregs) {
+ answer |= 1u << VFP32DREGS;
+ }
#endif // CAN_USE_VFP32DREGS
-
-#ifdef __arm__
- // If the compiler is allowed to use VFP then we can use VFP too in our code
- // generation even when generating snapshots. ARMv7 and hardware floating
- // point support implies VFPv3, see ARM DDI 0406B, page A1-6.
-#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
- && !defined(__SOFTFP__)
- answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
-#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
- // && !defined(__SOFTFP__)
-#endif // _arm__
- if (answer & (1u << ARMv7)) {
+ if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
}
@@ -94,18 +86,13 @@ static unsigned CpuFeaturesImpliedByCompiler() {
const char* DwVfpRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(VFP2)) {
- ASSERT(index >= 0 && index < NumAllocatableRegisters());
- ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
- kNumReservedRegisters - 1);
- if (index >= kDoubleRegZero.code())
- index += kNumReservedRegisters;
-
- return VFPRegisters::Name(index, true);
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
+ ASSERT(index >= 0 && index < NumAllocatableRegisters());
+ ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+ kNumReservedRegisters - 1);
+ if (index >= kDoubleRegZero.code())
+ index += kNumReservedRegisters;
+
+ return VFPRegisters::Name(index, true);
}
@@ -124,6 +111,8 @@ void CpuFeatures::Probe() {
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
+ printf(" ");
+ PrintFeatures();
return;
}
@@ -133,8 +122,7 @@ void CpuFeatures::Probe() {
if (FLAG_enable_vfp3) {
supported_ |=
static_cast<uint64_t>(1) << VFP3 |
- static_cast<uint64_t>(1) << ARMv7 |
- static_cast<uint64_t>(1) << VFP2;
+ static_cast<uint64_t>(1) << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
@@ -153,48 +141,127 @@ void CpuFeatures::Probe() {
supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
+ if (FLAG_enable_unaligned_accesses) {
+ supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
+ }
+
#else // __arm__
// Probe for additional features not already known to be available.
- if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
+ if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
- // detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
+ // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << VFP3 |
- static_cast<uint64_t>(1) << ARMv7 |
- static_cast<uint64_t>(1) << VFP2;
- } else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
- found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP2;
+ static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
+ if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
- if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
+ if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
- if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
+ if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
+ && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
- OS::ArmCpuHasFeature(ARMv7)) {
+ FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
- if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
+ if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
+ && OS::ArmCpuHasFeature(VFP32DREGS)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
supported_ |= found_by_runtime_probing_only_;
#endif
- // Assert that VFP3 implies VFP2 and ARMv7.
- ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
+ // Assert that VFP3 implies ARMv7.
+ ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
+}
+
+
+void CpuFeatures::PrintTarget() {
+ const char* arm_arch = NULL;
+ const char* arm_test = "";
+ const char* arm_fpu = "";
+ const char* arm_thumb = "";
+ const char* arm_float_abi = NULL;
+
+#if defined CAN_USE_ARMV7_INSTRUCTIONS
+ arm_arch = "arm v7";
+#else
+ arm_arch = "arm v6";
+#endif
+
+#ifdef __arm__
+
+# ifdef ARM_TEST
+ arm_test = " test";
+# endif
+# if defined __ARM_NEON__
+ arm_fpu = " neon";
+# elif defined CAN_USE_VFP3_INSTRUCTIONS
+ arm_fpu = " vfp3";
+# else
+ arm_fpu = " vfp2";
+# endif
+# if (defined __thumb__) || (defined __thumb2__)
+ arm_thumb = " thumb";
+# endif
+ arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
+
+#else // __arm__
+
+ arm_test = " simulator";
+# if defined CAN_USE_VFP3_INSTRUCTIONS
+# if defined CAN_USE_VFP32DREGS
+ arm_fpu = " vfp3";
+# else
+ arm_fpu = " vfp3-d16";
+# endif
+# else
+ arm_fpu = " vfp2";
+# endif
+# if USE_EABI_HARDFLOAT == 1
+ arm_float_abi = "hard";
+# else
+ arm_float_abi = "softfp";
+# endif
+
+#endif // __arm__
+
+ printf("target%s %s%s%s %s\n",
+ arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
+}
+
+
+void CpuFeatures::PrintFeatures() {
+ printf(
+ "ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
+ "MOVW_MOVT_IMMEDIATE_LOADS=%d",
+ CpuFeatures::IsSupported(ARMv7),
+ CpuFeatures::IsSupported(VFP3),
+ CpuFeatures::IsSupported(VFP32DREGS),
+ CpuFeatures::IsSupported(SUDIV),
+ CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
+ CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+#ifdef __arm__
+ bool eabi_hardfloat = OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
+ bool eabi_hardfloat = true;
+#else
+ bool eabi_hardfloat = false;
+#endif
+ printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
}
@@ -1763,7 +1830,6 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-924.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | offset
- ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1807,7 +1873,6 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
- ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1851,7 +1916,6 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406C.b, A8-1082.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | (offset/4)
- ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1895,7 +1959,6 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
- ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@@ -1938,7 +2001,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1960,7 +2022,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
- ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -1981,7 +2042,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
- ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -2002,7 +2062,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
- ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@@ -2016,7 +2075,7 @@ void Assembler::vstm(BlockAddrMode am,
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@@ -2076,8 +2135,6 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
- ASSERT(IsEnabled(VFP2));
-
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
@@ -2148,7 +2205,6 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
- ASSERT(IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@@ -2163,7 +2219,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-938.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -2181,7 +2236,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
ASSERT(index.index == 0 || index.index == 1);
int vd, d;
dst.split_code(&vd, &d);
@@ -2198,7 +2252,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
int vm, m;
dst.split_code(&vm, &m);
@@ -2215,7 +2268,6 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
- ASSERT(IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
int vm, m;
src.split_code(&vm, &m);
@@ -2231,7 +2283,6 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@@ -2246,7 +2297,6 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@@ -2371,7 +2421,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@@ -2380,7 +2429,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@@ -2389,7 +2437,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@@ -2398,7 +2445,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2407,7 +2453,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2416,7 +2461,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@@ -2425,7 +2469,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
- ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@@ -2436,7 +2479,6 @@ void Assembler::vneg(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -2453,7 +2495,6 @@ void Assembler::vabs(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -2472,7 +2513,6 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -2493,7 +2533,6 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -2514,7 +2553,6 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -2571,7 +2609,6 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@@ -2590,7 +2627,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
src1.split_code(&vd, &d);
int vm, m;
@@ -2607,7 +2643,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
ASSERT(src2 == 0.0);
int vd, d;
src1.split_code(&vd, &d);
@@ -2619,7 +2654,6 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2629,7 +2663,6 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
- ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@@ -2641,7 +2674,6 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
- ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@@ -2747,9 +2779,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2998,7 +3030,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
const double double_data = rinfo.data64();
uint64_t uint_data = 0;
- memcpy(&uint_data, &double_data, sizeof(double_data));
+ OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
}
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 045638e12..0aecbcdd6 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -55,16 +55,15 @@ class CpuFeatures : public AllStatic {
// is enabled (snapshots must be portable).
static void Probe();
+ // Display target use when compiling.
+ static void PrintTarget();
+
+ // Display features.
+ static void PrintFeatures();
+
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- if (f == VFP3 && !FLAG_enable_vfp3) return false;
- if (f == VFP2 && !FLAG_enable_vfp2) return false;
- if (f == SUDIV && !FLAG_enable_sudiv) return false;
- if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
- return false;
- }
- if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
return (supported_ & (1u << f)) != 0;
}
@@ -117,7 +116,6 @@ struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
- static const int kGPRsPerNonVFP2Double = 2;
inline static int NumAllocatableRegisters();
@@ -214,6 +212,7 @@ const Register pc = { kRegister_pc_Code };
// Single word VFP register.
struct SwVfpRegister {
+ static const int kSizeInBytes = 4;
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
@@ -244,6 +243,7 @@ struct DwVfpRegister {
static const int kNumReservedRegisters = 2;
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
+ static const int kSizeInBytes = 8;
// Note: the number of registers can be different at snapshot and run-time.
// Any code included in the snapshot must be able to run both with 16 or 32
@@ -370,9 +370,6 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
-const Register sfpd_lo = { kRegister_r6_Code };
-const Register sfpd_hi = { kRegister_r7_Code };
-
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index d982f2706..1db415292 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -38,6 +39,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { r3, r2, r1 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
+
+
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -133,7 +146,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* lhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
@@ -181,9 +193,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->fast_new_closure_total(), 1, r6, r7);
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+ int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@@ -403,153 +413,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // r3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
-
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ Allocate(size, r0, r1, r2, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(r2, Operand(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(r0, allocation_info_start));
- __ str(r3, FieldMemOperand(r0, allocation_info_start + kPointerSize));
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ ldr(r1, FieldMemOperand(r3, i));
- __ str(r1, FieldMemOperand(r0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ add(r2, r0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ add(r2, r0, Operand(JSArray::kSize));
- }
- __ str(r2, FieldMemOperand(r0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(r2, r3, r1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
- __ ldr(r0, MemOperand(sp, 1 * kPointerSize));
- __ add(r3, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
- __ b(eq, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ ldr(r0, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ CompareRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
- __ b(ne, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&check_fast_elements);
- __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex);
- __ b(ne, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(r3);
- __ ldr(r3, FieldMemOperand(r3, JSArray::kElementsOffset));
- __ ldr(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ CompareRoot(r3, expected_map_index);
- __ Assert(eq, message);
- __ pop(r3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ add(sp, sp, Operand(3 * kPointerSize));
- __ Ret();
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -650,30 +513,15 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
- __ vmov(d7.high(), scratch1);
- __ vcvt_f64_s32(d7, d7.high());
- __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
- __ vmov(d6.high(), scratch1);
- __ vcvt_f64_s32(d6, d6.high());
- if (destination == kCoreRegisters) {
- __ vmov(r2, r3, d7);
- __ vmov(r0, r1, d6);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from r0 to r3 and r2 in double format.
- __ mov(scratch1, Operand(r0));
- ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
- __ push(lr);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from r1 to r1 and r0 in double format.
- __ mov(scratch1, Operand(r1));
- ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
+ __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
+ __ vmov(d7.high(), scratch1);
+ __ vcvt_f64_s32(d7, d7.high());
+ __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
+ __ vmov(d6.high(), scratch1);
+ __ vcvt_f64_s32(d6, d6.high());
+ if (destination == kCoreRegisters) {
+ __ vmov(r2, r3, d7);
+ __ vmov(r0, r1, d6);
}
}
@@ -700,9 +548,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(VFP2) &&
- destination == kVFPRegisters) {
- CpuFeatureScope scope(masm, VFP2);
+ if (destination == kVFPRegisters) {
// Load the double from tagged HeapNumber to double register.
__ sub(scratch1, object, Operand(kHeapObjectTag));
__ vldr(dst, scratch1, HeapNumber::kValueOffset);
@@ -715,23 +561,12 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- // Convert smi to double using VFP instructions.
- __ vmov(dst.high(), scratch1);
- __ vcvt_f64_s32(dst, dst.high());
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ vmov(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, Operand(object));
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(lr);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(lr);
+ // Convert smi to double using VFP instructions.
+ __ vmov(dst.high(), scratch1);
+ __ vcvt_f64_s32(dst, dst.high());
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ vmov(dst1, dst2, dst);
}
__ bind(&done);
@@ -778,62 +613,10 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Label done;
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(single_scratch, int_scratch);
- __ vcvt_f64_s32(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ cmp(int_scratch, Operand::Zero());
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ b(eq, &done);
-
- // Preload the sign of the value.
- __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
- // Get the absolute value of the object (as an unsigned integer).
- __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
- __ rsb(dst_mantissa, dst_mantissa, Operand(31));
-
- // Set the exponent.
- __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst_exponent, scratch2, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
-
- __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- // Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
- SetCC);
- __ b(mi, &fewer_than_20_useful_bits);
- // Set the higher 20 bits of the mantissa.
- __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
- __ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
- __ b(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
- __ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst_exponent, dst_exponent, scratch2);
- // Set dst1 to 0.
- __ mov(dst_mantissa, Operand::Zero());
+ __ vmov(single_scratch, int_scratch);
+ __ vcvt_f64_s32(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
__ bind(&done);
}
@@ -872,65 +655,17 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
-
- __ TestDoubleIsInt32(double_dst, double_scratch);
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_dst, scratch1, HeapNumber::kValueOffset);
- if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
- }
-
- } else {
- ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
-
- // Check for 0 and -0.
- Label zero;
- __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst_mantissa));
- __ cmp(scratch1, Operand::Zero());
- __ b(eq, &zero);
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
- __ b(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ b(not_int32);
+ __ TestDoubleIsInt32(double_dst, double_scratch);
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
+ if (destination == kCoreRegisters) {
+ __ vmov(dst_mantissa, dst_exponent, double_dst);
}
-
__ bind(&done);
}
@@ -963,43 +698,13 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
-
- // Load the double value.
- __ sub(scratch1, object, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
+ // Load the double value.
+ __ sub(scratch1, object, Operand(kHeapObjectTag));
+ __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
- // Jump to not_int32 if the operation did not succeed.
- __ b(ne, not_int32);
- } else {
- // Load the double value in the destination registers.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ bic(dst, scratch1, Operand(HeapNumber::kSignMask));
- __ orr(dst, scratch2, Operand(dst));
- __ cmp(dst, Operand::Zero());
- __ b(eq, &done);
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ mov(dst, Operand(dst, LSR, scratch3));
- // Set the implicit first bit.
- __ rsb(scratch3, scratch3, Operand(32));
- __ orr(dst, dst, Operand(scratch2, LSL, scratch3));
- // Set the sign.
- __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ tst(scratch1, Operand(HeapNumber::kSignMask));
- __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
- }
+ __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1);
+ // Jump to not_int32 if the operation did not succeed.
+ __ b(ne, not_int32);
__ b(&done);
__ bind(&maybe_undefined);
@@ -1093,7 +798,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(lr);
__ PrepareCallCFunction(0, 2, scratch);
if (masm->use_eabi_hardfloat()) {
- CpuFeatureScope scope(masm, VFP2);
__ vmov(d0, r0, r1);
__ vmov(d1, r2, r3);
}
@@ -1105,7 +809,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
// Store answer in the overwritable heap number. Double returned in
// registers r0 and r1 or in d0.
if (masm->use_eabi_hardfloat()) {
- CpuFeatureScope scope(masm, VFP2);
__ vstr(d0,
FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@@ -1318,23 +1021,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Lhs is a smi, rhs is a number.
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert lhs to a double in d7.
- CpuFeatureScope scope(masm, VFP2);
- __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
- // Load the double from rhs, tagged HeapNumber r0, to d6.
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- } else {
- __ push(lr);
- // Convert lhs to a double in r2, r3.
- __ mov(r7, Operand(lhs));
- ConvertToDoubleStub stub1(r3, r2, r7, r6);
- __ Call(stub1.GetCode(masm->isolate()));
- // Load rhs to a double in r0, r1.
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ pop(lr);
- }
+ // Convert lhs to a double in d7.
+ __ SmiToDoubleVFPRegister(lhs, d7, r7, s15);
+ // Load the double from rhs, tagged HeapNumber r0, to d6.
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
// We now have both loaded as doubles but we can skip the lhs nan check
// since it's a smi.
@@ -1358,23 +1049,11 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
}
// Rhs is a smi, lhs is a heap number.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- // Load the double from lhs, tagged HeapNumber r1, to d7.
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- // Convert rhs to a double in d6 .
- __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
- } else {
- __ push(lr);
- // Load lhs to a double in r2, r3.
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- // Convert rhs to a double in r0, r1.
- __ mov(r7, Operand(rhs));
- ConvertToDoubleStub stub2(r1, r0, r7, r6);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(lr);
- }
+ // Load the double from lhs, tagged HeapNumber r1, to d7.
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
+ // Convert rhs to a double in d6 .
+ __ SmiToDoubleVFPRegister(rhs, d6, r7, s13);
// Fall through to both_loaded_as_doubles.
}
@@ -1431,60 +1110,6 @@ void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
// See comment at call site.
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
- Condition cond) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- Register rhs_exponent = exp_first ? r0 : r1;
- Register lhs_exponent = exp_first ? r2 : r3;
- Register rhs_mantissa = exp_first ? r1 : r0;
- Register lhs_mantissa = exp_first ? r3 : r2;
-
- // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
- if (cond == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- __ cmp(rhs_mantissa, Operand(lhs_mantissa));
- __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
- // Return non-zero if the numbers are unequal.
- __ Ret(ne);
-
- __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
- // If exponents are equal then return 0.
- __ Ret(eq);
-
- // Exponents are unequal. The only way we can return that the numbers
- // are equal is if one is -0 and the other is 0. We already dealt
- // with the case where both are -0 or both are 0.
- // We start by seeing if the mantissas (that are equal) or the bottom
- // 31 bits of the rhs exponent are non-zero. If so we return not
- // equal.
- __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC);
- __ mov(r0, Operand(r4), LeaveCC, ne);
- __ Ret(ne);
- // Now they are equal if and only if the lhs exponent is zero in its
- // low 31 bits.
- __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize));
- __ Ret();
- } else {
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- __ push(lr);
- __ PrepareCallCFunction(0, 2, r5);
- if (masm->use_eabi_hardfloat()) {
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(d0, r0, r1);
- __ vmov(d1, r2, r3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(pc); // Return.
- }
-}
-
-
-// See comment at call site.
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -1547,16 +1172,10 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- __ sub(r7, rhs, Operand(kHeapObjectTag));
- __ vldr(d6, r7, HeapNumber::kValueOffset);
- __ sub(r7, lhs, Operand(kHeapObjectTag));
- __ vldr(d7, r7, HeapNumber::kValueOffset);
- } else {
- __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- }
+ __ sub(r7, rhs, Operand(kHeapObjectTag));
+ __ vldr(d6, r7, HeapNumber::kValueOffset);
+ __ sub(r7, lhs, Operand(kHeapObjectTag));
+ __ vldr(d7, r7, HeapNumber::kValueOffset);
__ jmp(both_loaded_as_doubles);
}
@@ -1637,42 +1256,37 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ add(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
- __ eor(scratch1, scratch1, Operand(scratch2));
- __ and_(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ add(scratch1,
- number_string_cache,
- Operand(scratch1, LSL, kPointerSizeLog2 + 1));
-
- Register probe = mask;
- __ ldr(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ sub(scratch2, object, Operand(kHeapObjectTag));
- __ vldr(d0, scratch2, HeapNumber::kValueOffset);
- __ sub(probe, probe, Operand(kHeapObjectTag));
- __ vldr(d1, probe, HeapNumber::kValueOffset);
- __ VFPCompareAndSetFlags(d0, d1);
- __ b(ne, not_found); // The cache did not contain this value.
- __ b(&load_result_from_cache);
- } else {
- __ b(not_found);
- }
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ add(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
+ __ eor(scratch1, scratch1, Operand(scratch2));
+ __ and_(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ add(scratch1,
+ number_string_cache,
+ Operand(scratch1, LSL, kPointerSizeLog2 + 1));
+
+ Register probe = mask;
+ __ ldr(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ sub(scratch2, object, Operand(kHeapObjectTag));
+ __ vldr(d0, scratch2, HeapNumber::kValueOffset);
+ __ sub(probe, probe, Operand(kHeapObjectTag));
+ __ vldr(d1, probe, HeapNumber::kValueOffset);
+ __ VFPCompareAndSetFlags(d0, d1);
+ __ b(ne, not_found); // The cache did not contain this value.
+ __ b(&load_result_from_cache);
}
__ bind(&is_smi);
@@ -1787,37 +1401,27 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// The arguments have been converted to doubles and stored in d6 and d7, if
// VFP3 is supported, or in r0, r1, r2, and r3.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(VFP2)) {
- __ bind(&lhs_not_nan);
- CpuFeatureScope scope(masm, VFP2);
- Label no_nan;
- // ARMv7 VFP3 instructions to implement double precision comparison.
- __ VFPCompareAndSetFlags(d7, d6);
- Label nan;
- __ b(vs, &nan);
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
+ __ bind(&lhs_not_nan);
+ Label no_nan;
+ // ARMv7 VFP3 instructions to implement double precision comparison.
+ __ VFPCompareAndSetFlags(d7, d6);
+ Label nan;
+ __ b(vs, &nan);
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
- __ bind(&nan);
- // If one of the sides was a NaN then the v flag is set. Load r0 with
- // whatever it takes to make the comparison fail, since comparisons with NaN
- // always fail.
- if (cc == lt || cc == le) {
- __ mov(r0, Operand(GREATER));
- } else {
- __ mov(r0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // If one of the sides was a NaN then the v flag is set. Load r0 with
+ // whatever it takes to make the comparison fail, since comparisons with NaN
+ // always fail.
+ if (cc == lt || cc == le) {
+ __ mov(r0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc);
- // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
- // answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ __ mov(r0, Operand(LESS));
}
+ __ Ret();
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
@@ -1914,7 +1518,6 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
// we cannot call anything that could cause a GC from this stub.
Label patch;
const Register map = r9.is(tos_) ? r7 : r9;
- const Register temp = map;
// undefined -> false.
CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false);
@@ -1957,9 +1560,9 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
if (types_.Contains(STRING)) {
// String value -> false iff empty.
- __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
- __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
- __ Ret(lt); // the string length is OK as the return value
+ __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+ __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt);
+ __ Ret(lt); // the string length is OK as the return value
}
if (types_.Contains(HEAP_NUMBER)) {
@@ -1968,55 +1571,13 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
__ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
__ b(ne, &not_heap_number);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
-
- __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
- __ VFPCompareAndSetFlags(d1, 0.0);
- // "tos_" is a register, and contains a non zero value by default.
- // Hence we only need to overwrite "tos_" with zero to return false for
- // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
- __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
- __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
- } else {
- Label done, not_nan, not_zero;
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- // -0 maps to false:
- __ bic(
- temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC);
- __ b(ne, &not_zero);
- // If exponent word is zero then the answer depends on the mantissa word.
- __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ jmp(&done);
-
- // Check for NaN.
- __ bind(&not_zero);
- // We already zeroed the sign bit, now shift out the mantissa so we only
- // have the exponent left.
- __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord));
- unsigned int shifted_exponent_mask =
- HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord;
- __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32));
- __ b(ne, &not_nan); // If exponent is not 0x7ff then it can't be a NaN.
-
- // Reload exponent word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset));
- __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32));
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- // Load mantissa word.
- __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset));
- __ cmp(temp, Operand::Zero());
- // If mantissa is not zero then we have a NaN, so return 0.
- __ mov(tos_, Operand::Zero(), LeaveCC, ne);
- __ b(ne, &done);
-
- __ bind(&not_nan);
- __ mov(tos_, Operand(1, RelocInfo::NONE32));
- __ bind(&done);
- }
+ __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset));
+ __ VFPCompareAndSetFlags(d1, 0.0);
+ // "tos_" is a register, and contains a non zero value by default.
+ // Hence we only need to overwrite "tos_" with zero to return false for
+ // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true.
+ __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO
+ __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN
__ Ret();
__ bind(&not_heap_number);
}
@@ -2069,7 +1630,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
const Register scratch = r1;
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(scratch);
@@ -2089,8 +1649,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, VFP2);
-
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(scratch);
@@ -2315,19 +1873,10 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm,
__ bind(&heapnumber_allocated);
}
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r1 to the heap number in r0. r2 is corrupted.
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(s0, r1);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(r1, r0, r2);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
+ __ vmov(s0, r1);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ Ret();
}
@@ -2383,7 +1932,7 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
+ platform_specific_bit_ = true; // VFP2 is a base requirement for V8
}
@@ -2662,7 +2211,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Load left and right operands into d6 and d7 or r0/r1 and r2/r3
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(VFP2) &&
op != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
@@ -2706,7 +2254,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Using VFP registers:
// d6: Left value
// d7: Right value
- CpuFeatureScope scope(masm, VFP2);
switch (op) {
case Token::ADD:
__ vadd(d5, d6, d7);
@@ -2797,11 +2344,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, &result_not_a_smi);
- } else {
- __ b(mi, not_numbers);
- }
+ __ b(mi, &result_not_a_smi);
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
@@ -2837,25 +2380,17 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// result.
__ mov(r0, Operand(r5));
- if (CpuFeatures::IsSupported(VFP2)) {
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(s0, r2);
- if (op == Token::SHR) {
- __ vcvt_f64_u32(d0, s0);
- } else {
- __ vcvt_f64_s32(d0, s0);
- }
- __ sub(r3, r0, Operand(kHeapObjectTag));
- __ vstr(d0, r3, HeapNumber::kValueOffset);
- __ Ret();
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As
+ // mentioned above SHR needs to always produce a positive result.
+ __ vmov(s0, r2);
+ if (op == Token::SHR) {
+ __ vcvt_f64_u32(d0, s0);
} else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
+ __ vcvt_f64_s32(d0, s0);
}
+ __ sub(r3, r0, Operand(kHeapObjectTag));
+ __ vstr(d0, r3, HeapNumber::kValueOffset);
+ __ Ret();
break;
}
default:
@@ -3001,8 +2536,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD)
+ FloatingPointHelper::Destination destination = (op_ != Token::MOD)
? FloatingPointHelper::kVFPRegisters
: FloatingPointHelper::kCoreRegisters;
@@ -3032,7 +2566,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatureScope scope(masm, VFP2);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -3200,17 +2733,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (r2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
- // The non vfp2 code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(VFP2)) {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number);
- } else {
- __ b(mi, (result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime);
- }
+ __ b(mi, (result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number);
break;
case Token::SHL:
__ and_(r2, r2, Operand(0x1f));
@@ -3238,31 +2763,22 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&call_runtime,
mode_);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_s32(double_scratch, double_scratch.low());
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ vmov(double_scratch.low(), r2);
- __ vcvt_f64_u32(double_scratch, double_scratch.low());
- }
-
- // Store the result.
- __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
- __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
- __ mov(r0, heap_number_result);
- __ Ret();
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_s32(double_scratch, double_scratch.low());
} else {
- // Tail call that writes the int32 in r2 to the heap number in r0, using
- // r3 as scratch. r0 is preserved and returned.
- __ mov(r0, r5);
- WriteInt32ToHeapNumberStub stub(r2, r0, r3);
- __ TailCallStub(&stub);
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ vmov(double_scratch.low(), r2);
+ __ vcvt_f64_u32(double_scratch, double_scratch.low());
}
+ // Store the result.
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag));
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset);
+ __ mov(r0, heap_number_result);
+ __ Ret();
+
break;
}
@@ -3441,100 +2957,96 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = r0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- if (tagged) {
- // Argument is a number and is on stack and in r0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(r0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into r2, r3.
- __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
- __ b(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(r0,
- r1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Load it to a double register and store the
- // low and high words into r2, r3.
- __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
- __ vmov(r2, r3, d0);
- } else {
- // Input is untagged double in d2. Output goes to d2.
- __ vmov(r2, r3, d2);
- }
- __ bind(&loaded);
- // r2 = low 32 bits of double value
- // r3 = high 32 bits of double value
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ eor(r1, r2, Operand(r3));
- __ eor(r1, r1, Operand(r1, ASR, 16));
- __ eor(r1, r1, Operand(r1, ASR, 8));
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // r2 = low 32 bits of double value.
- // r3 = high 32 bits of double value.
- // r1 = TranscendentalCache::hash(double value).
- Isolate* isolate = masm->isolate();
- ExternalReference cache_array =
- ExternalReference::transcendental_cache_array_address(isolate);
- __ mov(cache_entry, Operand(cache_array));
- // cache_entry points to cache array.
- int cache_array_index
- = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
- __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
- // r0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ cmp(cache_entry, Operand::Zero());
- __ b(eq, &invalid_cache);
+ if (tagged) {
+ // Argument is a number and is on stack and in r0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(r0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into r2, r3.
+ __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
+ __ b(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(r0,
+ r1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Load it to a double register and store the
+ // low and high words into r2, r3.
+ __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset));
+ __ vmov(r2, r3, d0);
+ } else {
+ // Input is untagged double in d2. Output goes to d2.
+ __ vmov(r2, r3, d2);
+ }
+ __ bind(&loaded);
+ // r2 = low 32 bits of double value
+ // r3 = high 32 bits of double value
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ eor(r1, r2, Operand(r3));
+ __ eor(r1, r1, Operand(r1, ASR, 16));
+ __ eor(r1, r1, Operand(r1, ASR, 8));
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // r2 = low 32 bits of double value.
+ // r3 = high 32 bits of double value.
+ // r1 = TranscendentalCache::hash(double value).
+ Isolate* isolate = masm->isolate();
+ ExternalReference cache_array =
+ ExternalReference::transcendental_cache_array_address(isolate);
+ __ mov(cache_entry, Operand(cache_array));
+ // cache_entry points to cache array.
+ int cache_array_index
+ = type_ * sizeof(isolate->transcendental_cache()->caches_[0]);
+ __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index));
+ // r0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ cmp(cache_entry, Operand::Zero());
+ __ b(eq, &invalid_cache);
#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
#endif
- // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
- __ add(r1, r1, Operand(r1, LSL, 1));
- __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
- __ cmp(r2, r4);
- __ cmp(r3, r5, eq);
- __ b(ne, &calculate);
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into r0.
- __ pop();
- __ mov(r0, Operand(r6));
- } else {
- // Load result into d2.
- __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(VFP3))
+ // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12].
+ __ add(r1, r1, Operand(r1, LSL, 1));
+ __ add(cache_entry, cache_entry, Operand(r1, LSL, 2));
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit());
+ __ cmp(r2, r4);
+ __ cmp(r3, r5, eq);
+ __ b(ne, &calculate);
+ // Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
+ if (tagged) {
+ // Pop input value from stack and load result into r0.
+ __ pop();
+ __ mov(r0, Operand(r6));
+ } else {
+ // Load result into d2.
+ __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset));
+ }
+ __ Ret();
__ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
@@ -3543,9 +3055,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
ExternalReference(RuntimeFunction(), masm->isolate());
__ TailCallExternalReference(runtime_function, 1, 1);
} else {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatureScope scope(masm, VFP2);
-
Label no_update;
Label skip_cache;
@@ -3605,7 +3114,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm,
Register scratch) {
- ASSERT(masm->IsEnabled(VFP2));
Isolate* isolate = masm->isolate();
__ push(lr);
@@ -3666,7 +3174,6 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatureScope vfp2_scope(masm, VFP2);
const Register base = r1;
const Register exponent = r2;
const Register heapnumbermap = r5;
@@ -3879,14 +3386,13 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub save_doubles(1, mode);
StoreBufferOverflowStub stub(mode);
// These stubs might already be in the snapshot, detect that and don't
@@ -3895,11 +3401,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
Code* save_doubles_code;
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
save_doubles_code = *save_doubles.GetCode(isolate);
- save_doubles_code->set_is_pregenerated(true);
-
- Code* store_buffer_overflow_code = *stub.GetCode(isolate);
- store_buffer_overflow_code->set_is_pregenerated(true);
}
+ Code* store_buffer_overflow_code;
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
@@ -3989,6 +3497,8 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
masm->Jump(r5);
}
+ __ VFPEnsureFPSCRState(r2);
+
if (always_allocate) {
// It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
// though (contain the result).
@@ -4024,11 +3534,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
- __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ // Retrieve the pending exception.
__ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ ldr(r0, MemOperand(ip));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, r0, ip, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ __ mov(r3, Operand(isolate->factory()->the_hole_value()));
+ __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
__ str(r3, MemOperand(ip));
// Special handling of termination exceptions which are uncatchable
@@ -4146,13 +3663,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee-saved registers (incl. cp and fp), sp, and lr
__ stm(db_w, sp, kCalleeSaved | lr.bit());
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- // Save callee-saved vfp registers.
- __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- // Set up the reserved register for 0.0.
- __ vmov(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved vfp registers.
+ __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
+ // Set up the reserved register for 0.0.
+ __ vmov(kDoubleRegZero, 0.0);
+ __ VFPEnsureFPSCRState(r4);
// Get address of argv, see stm above.
// r0: code entry
@@ -4162,9 +3677,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Set up argv in r4.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(VFP2)) {
- offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
- }
+ offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize;
__ ldr(r4, MemOperand(sp, offset_to_argv));
// Push a frame with special values setup to mark it as an entry frame.
@@ -4300,11 +3813,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
#endif
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- // Restore callee-saved vfp registers.
- __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
- }
+ // Restore callee-saved vfp registers.
+ __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
@@ -4948,7 +4458,7 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX)));
// Copy the JS object part.
- __ CopyFields(r0, r4, r3.bit(), JSObject::kHeaderSize / kPointerSize);
+ __ CopyFields(r0, r4, d0, s0, JSObject::kHeaderSize / kPointerSize);
// Get the length (smi tagged) and set that as an in-object property too.
STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -7009,50 +6519,46 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP2 is unsupported.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(r0, &right_smi);
- __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
- __ b(&left);
- __ bind(&right_smi);
- __ SmiUntag(r2, r0); // Can't clobber r0 yet.
- SwVfpRegister single_scratch = d2.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d1, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(r1, &left_smi);
- __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r1, Operand(kHeapObjectTag));
- __ vldr(d0, r2, HeapNumber::kValueOffset);
- __ b(&done);
- __ bind(&left_smi);
- __ SmiUntag(r2, r1); // Can't clobber r1 yet.
- single_scratch = d3.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d0, single_scratch);
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(r0, &right_smi);
+ __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vldr(d1, r2, HeapNumber::kValueOffset);
+ __ b(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(r2, r0); // Can't clobber r0 yet.
+ SwVfpRegister single_scratch = d2.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d1, single_scratch);
+
+ __ bind(&left);
+ __ JumpIfSmi(r1, &left_smi);
+ __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ sub(r2, r1, Operand(kHeapObjectTag));
+ __ vldr(d0, r2, HeapNumber::kValueOffset);
+ __ b(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(r2, r1); // Can't clobber r1 yet.
+ single_scratch = d3.low();
+ __ vmov(single_scratch, r2);
+ __ vcvt_f64_s32(d0, single_scratch);
- __ bind(&done);
- // Compare operands.
- __ VFPCompareAndSetFlags(d0, d1);
+ __ bind(&done);
+ // Compare operands.
+ __ VFPCompareAndSetFlags(d0, d1);
- // Don't base result on status bits when a NaN is involved.
- __ b(vs, &unordered);
+ // Don't base result on status bits when a NaN is involved.
+ __ b(vs, &unordered);
- // Return a result of -1, 0, or 1, based on status bits.
- __ mov(r0, Operand(EQUAL), LeaveCC, eq);
- __ mov(r0, Operand(LESS), LeaveCC, lt);
- __ mov(r0, Operand(GREATER), LeaveCC, gt);
- __ Ret();
- }
+ // Return a result of -1, 0, or 1, based on status bits.
+ __ mov(r0, Operand(EQUAL), LeaveCC, eq);
+ __ mov(r0, Operand(LESS), LeaveCC, lt);
+ __ mov(r0, Operand(GREATER), LeaveCC, gt);
+ __ Ret();
__ bind(&unordered);
__ bind(&generic_stub);
@@ -7343,6 +6849,7 @@ void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
__ Jump(target); // Call the C++ function.
ASSERT_EQ(Assembler::kInstrSize + Assembler::kPcLoadDelta,
masm->SizeOfCodeGeneratedSince(&start));
+ __ VFPEnsureFPSCRState(r2);
}
@@ -7662,11 +7169,6 @@ bool RecordWriteStub::IsPregenerated() {
}
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
@@ -7689,7 +7191,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(VFP2);
+ return true; // VFP2 is a base requirement for V8
}
@@ -7948,16 +7450,14 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
__ StoreNumberToDoubleElements(r0, r3,
// Overwrites all regs after this.
- r5, r6, r7, r9, r2,
+ r5, r9, r6, r7, r2,
&slow_elements);
__ Ret();
}
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(VFP2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 61ecc975f..741ff9ca8 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -61,11 +61,11 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ : save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -471,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
- CpuFeatureScope scope(masm, VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
@@ -489,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
- CpuFeatureScope scope(masm, VFP2);
// Restore all VFP registers except d0.
// TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index 1c829469c..9d773d4cc 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
- CpuFeatureScope use_vfp(&masm, VFP2);
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
@@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
@@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
- if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
- if (vfp2_supported) {
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(s0, r9);
- __ vcvt_f64_s32(d0, s0);
- __ vstr(d0, r7, 0);
- __ add(r7, r7, Operand(8));
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- r9,
- FloatingPointHelper::kCoreRegisters,
- d0,
- r0,
- r1,
- lr,
- s0);
- __ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
- }
+ __ vmov(s0, r9);
+ __ vcvt_f64_s32(d0, s0);
+ __ vstr(d0, r7, 0);
+ __ add(r7, r7, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
@@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
- if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}
diff --git a/deps/v8/src/arm/constants-arm.cc b/deps/v8/src/arm/constants-arm.cc
index cdca1f531..a13048476 100644
--- a/deps/v8/src/arm/constants-arm.cc
+++ b/deps/v8/src/arm/constants-arm.cc
@@ -51,7 +51,7 @@ double Instruction::DoubleImmedVmov() const {
uint64_t imm = high16 << 48;
double d;
- memcpy(&d, &imm, 8);
+ OS::MemCopy(&d, &imm, 8);
return d;
}
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index 004165ac3..747dc5627 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -44,21 +44,25 @@
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
# define CAN_USE_ARMV7_INSTRUCTIONS 1
+#ifndef CAN_USE_VFP3_INSTRUCTIONS
+# define CAN_USE_VFP3_INSTRUCTIONS
+#endif
#endif
-#if defined(__ARM_ARCH_6__) || \
- defined(__ARM_ARCH_6J__) || \
- defined(__ARM_ARCH_6K__) || \
- defined(__ARM_ARCH_6Z__) || \
+#if defined(__ARM_ARCH_6__) || \
+ defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || \
+ defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__) || \
defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
-#if defined(__ARM_ARCH_5T__) || \
- defined(__ARM_ARCH_5TE__) || \
- defined(__ARM_ARCH_5TEJ__) || \
+#if defined(__ARM_ARCH_5__) || \
+ defined(__ARM_ARCH_5T__) || \
+ defined(__ARM_ARCH_5TE__) || \
+ defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
@@ -403,6 +407,7 @@ const uint32_t kVFPOverflowExceptionBit = 1 << 2;
const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
const uint32_t kVFPInexactExceptionBit = 1 << 4;
const uint32_t kVFPFlushToZeroMask = 1 << 24;
+const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
const uint32_t kVFPNConditionFlagBit = 1 << 31;
const uint32_t kVFPZConditionFlagBit = 1 << 30;
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index 9bcc1ac14..25ad85c4b 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -117,45 +117,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // The back edge bookkeeping code matches the pattern:
- //
- // <decrement profiling counter>
- // 2a 00 00 01 bpl ok
- // e5 9f c? ?? ldr ip, [pc, <stack guard address>]
- // e1 2f ff 3c blx ip
- ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
-
- // We patch the code to the following form:
- //
- // <decrement profiling counter>
- // e1 a0 00 00 mov r0, r0 (NOP)
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
- // e1 2f ff 3c blx ip
- // and overwrite the constant containing the
- // address of the stack check stub.
-
- // Replace conditional jump with NOP.
+// The back edge bookkeeping code matches the pattern:
+//
+// <decrement profiling counter>
+// 2a 00 00 01 bpl ok
+// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
+// e1 2f ff 3c blx ip
+// ok-label
+//
+// We patch the code to the following form:
+//
+// <decrement profiling counter>
+// e1 a0 00 00 mov r0, r0 (NOP)
+// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
+// e1 2f ff 3c blx ip
+// ok-label
+
+void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(!InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
-
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
+ // Replace the call address.
+ uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
@@ -163,34 +157,61 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
}
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
+void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ static const int kInstrSize = Assembler::kInstrSize;
+ // Restore the original jump.
+ CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
+ patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ // Restore the original call address.
+ uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
+ 2 * kInstrSize) & 0xfff;
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
+ Memory::uint32_at(interrupt_address_pointer) =
+ reinterpret_cast<uint32_t>(interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
+}
+
+
+#ifdef DEBUG
+bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
- Code* check_code,
+ Code* interrupt_code,
Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
+ static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
- ASSERT(Assembler::IsLdrPcImmediateOffset(
- Assembler::instr_at(pc_after - 2 * kInstrSize)));
- // Replace NOP with conditional jump.
- CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
+ uint32_t interrupt_address_offset =
+ Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
+ Address interrupt_address_pointer = pc_after + interrupt_address_offset;
- // Replace the stack check address in the constant pool
- // with the entry address of the replacement code.
- uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
- 2 * kInstrSize) & 0xfff;
- Address stack_check_address_pointer = pc_after + stack_check_address_offset;
- ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
- Memory::uint32_at(stack_check_address_pointer) =
- reinterpret_cast<uint32_t>(check_code->entry());
-
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 2 * kInstrSize, check_code);
+ if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
+ Memory::uint32_at(interrupt_address_pointer));
+ return true;
+ } else {
+ ASSERT(Assembler::IsLdrPcImmediateOffset(
+ Assembler::instr_at(pc_after - 2 * kInstrSize)));
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
+ Memory::uint32_at(interrupt_address_pointer));
+ return false;
+ }
}
+#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
@@ -594,23 +615,18 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- // Save all allocatable VFP registers before messing with them.
- ASSERT(kDoubleRegZero.code() == 14);
- ASSERT(kScratchDoubleReg.code() == 15);
+ // Save all allocatable VFP registers before messing with them.
+ ASSERT(kDoubleRegZero.code() == 14);
+ ASSERT(kScratchDoubleReg.code() == 15);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
- // Push registers d0-d13, and possibly d16-d31, on the stack.
- // If d16-d31 are not pushed, decrease the stack pointer instead.
- __ vstm(db_w, sp, d16, d31, ne);
- __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
- __ vstm(db_w, sp, d0, d13);
- } else {
- __ sub(sp, sp, Operand(kDoubleRegsSize));
- }
+ // Push registers d0-d13, and possibly d16-d31, on the stack.
+ // If d16-d31 are not pushed, decrease the stack pointer instead.
+ __ vstm(db_w, sp, d16, d31, ne);
+ __ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
+ __ vstm(db_w, sp, d0, d13);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@@ -669,17 +685,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- // Copy VFP registers to
- // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
- int double_regs_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ vldr(d0, sp, src_offset);
- __ vstr(d0, r1, dst_offset);
- }
+ // Copy VFP registers to
+ // double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
+ int double_regs_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ vldr(d0, sp, src_offset);
+ __ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -749,21 +762,18 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- // Check CPU flags for number of registers, setting the Z condition flag.
- __ CheckFor32DRegs(ip);
+ // Check CPU flags for number of registers, setting the Z condition flag.
+ __ CheckFor32DRegs(ip);
- __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
- int src_offset = FrameDescription::double_registers_offset();
- for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
- if (i == kDoubleRegZero.code()) continue;
- if (i == kScratchDoubleReg.code()) continue;
+ __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
+ int src_offset = FrameDescription::double_registers_offset();
+ for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
+ if (i == kDoubleRegZero.code()) continue;
+ if (i == kScratchDoubleReg.code()) continue;
- const DwVfpRegister reg = DwVfpRegister::from_code(i);
- __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
- src_offset += kDoubleSize;
- }
+ const DwVfpRegister reg = DwVfpRegister::from_code(i);
+ __ vldr(reg, r1, src_offset, i < 16 ? al : ne);
+ src_offset += kDoubleSize;
}
// Push state, pc, and continuation from the last output frame.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index dec62b341..b84d35535 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1561,8 +1561,9 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(
+ f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/arm/frames-arm.h b/deps/v8/src/arm/frames-arm.h
index 30f4057fa..19b29b855 100644
--- a/deps/v8/src/arm/frames-arm.h
+++ b/deps/v8/src/arm/frames-arm.h
@@ -100,18 +100,6 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 6086645db..ba0f14128 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -162,8 +162,6 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- int locals_count = info->scope()->num_stack_slots();
-
info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
@@ -179,6 +177,9 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
@@ -313,7 +314,7 @@ void FullCodeGenerator::Generate() {
EmitReturnSequence();
// Force emit the constant pool, so it doesn't get emitted in the middle
- // of the stack check table.
+ // of the back edge table.
masm()->CheckConstPool(true, false);
}
@@ -350,7 +351,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting stack check code.
+ // Block literal pools whilst emitting back edge code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
@@ -1268,7 +1269,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@@ -1562,7 +1563,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
- __ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
+ __ CopyFields(r0, r5, d0, s0, size / kPointerSize);
context()->Plug(r0);
}
@@ -1727,7 +1728,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
- __ Push(r3, r2, r1);
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
@@ -1738,8 +1738,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1) {
+ __ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@@ -3024,37 +3027,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(VFP2)) {
- __ PrepareCallCFunction(1, r0);
- __ ldr(r0,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatureScope scope(masm(), VFP2);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- // Create this constant using mov/orr to avoid PC relative load.
- __ mov(r1, Operand(0x41000000));
- __ orr(r1, r1, Operand(0x300000));
- // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
- __ vmov(d7, r0, r1);
- // Move 0x4130000000000000 to VFP.
- __ mov(r0, Operand::Zero());
- __ vmov(d8, r0, r1);
- // Subtract and store the result in the heap number.
- __ vsub(d7, d7, d8);
- __ sub(r0, r4, Operand(kHeapObjectTag));
- __ vstr(d7, r0, HeapNumber::kValueOffset);
- __ mov(r0, r4);
- } else {
- __ PrepareCallCFunction(2, r0);
- __ ldr(r1,
- ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
- __ mov(r0, Operand(r4));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
+ __ PrepareCallCFunction(1, r0);
+ __ ldr(r0,
+ ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
+ __ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ // Create this constant using mov/orr to avoid PC relative load.
+ __ mov(r1, Operand(0x41000000));
+ __ orr(r1, r1, Operand(0x300000));
+ // Move 0x41300000xxxxxxxx (x = random bits) to VFP.
+ __ vmov(d7, r0, r1);
+ // Move 0x4130000000000000 to VFP.
+ __ mov(r0, Operand::Zero());
+ __ vmov(d8, r0, r1);
+ // Subtract and store the result in the heap number.
+ __ vsub(d7, d7, d8);
+ __ sub(r0, r4, Operand(kHeapObjectTag));
+ __ vstr(d7, r0, HeapNumber::kValueOffset);
+ __ mov(r0, r4);
context()->Plug(r0);
}
@@ -3191,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(VFP2)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(r0);
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index f2b65efe8..f6029b514 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -302,17 +302,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1124,47 +1113,101 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathPowHalf) {
- LOperand* input = UseFixedDouble(instr->value(), d2);
- LOperand* temp = FixedTemp(d3);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, d2);
- } else {
- LOperand* input = UseRegister(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(d3);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, d2), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), d2);
+ LOperand* temp = FixedTemp(d3);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineFixedDouble(result, d2);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
@@ -1933,7 +1976,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
+ return AssignEnvironment(result);
}
@@ -2133,16 +2176,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on non-VFP2 requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(VFP2) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
+ LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@@ -2341,11 +2375,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
}
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 207faf46e..6486cad2b 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -97,7 +97,6 @@ class LCodeGen;
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -134,9 +133,18 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -181,7 +189,6 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf) \
V(ForInPrepareMap) \
@@ -703,9 +710,22 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor: public LTemplateInstruction<1, 1, 0> {
public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -713,11 +733,69 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathAbs(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathSin: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathCos: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
@@ -740,8 +818,32 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
- virtual void PrintDataTo(StringStream* stream);
+
+class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -1310,7 +1412,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -2074,7 +2176,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+ bool NeedsCanonicalization() {
+ if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+ hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+ return false;
+ }
+ return hydrogen()->NeedsCanonicalization();
+ }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -2239,7 +2347,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2355,13 +2463,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -2611,6 +2712,17 @@ class LChunkBuilder BASE_EMBEDDED {
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 7bb3535ff..a19015d80 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -113,7 +113,7 @@ void LCodeGen::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
@@ -195,8 +195,7 @@ bool LCodeGen::GeneratePrologue() {
}
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
+ if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
@@ -852,7 +851,9 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
- if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
+ if (FLAG_deopt_every_n_times == 1 &&
+ !info()->IsStub() &&
+ info()->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -1209,8 +1210,6 @@ void LCodeGen::DoModI(LModI* instr) {
Label vfp_modulo, both_positive, right_negative;
- CpuFeatureScope scope(masm(), VFP2);
-
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
@@ -1615,7 +1614,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
- CpuFeatureScope vfp_scope(masm(), VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@@ -1901,7 +1899,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
- CpuFeatureScope scope(masm(), VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@@ -2072,7 +2069,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left_reg = ToDoubleRegister(left);
DwVfpRegister right_reg = ToDoubleRegister(right);
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
@@ -2118,7 +2114,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left = ToDoubleRegister(instr->left());
DwVfpRegister right = ToDoubleRegister(instr->right());
DwVfpRegister result = ToDoubleRegister(instr->result());
@@ -2209,7 +2204,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
- CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
@@ -2301,7 +2295,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatureScope scope(masm(), VFP2);
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -2381,7 +2374,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@@ -2936,8 +2928,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
+ if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
@@ -3319,58 +3310,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, kScratchDoubleReg.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ ldr(value, MemOperand(scratch0(), additional_offset));
- __ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
- __ and_(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ teq(scratch0(), Operand(0x00));
- __ b(eq, &exponent_rebiased);
-
- __ teq(scratch0(), Operand(0xff));
- __ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
- __ b(eq, &exponent_rebiased);
-
- // Rebias exponent.
- __ add(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ and_(sfpd_hi, value, Operand(kBinary32SignMask));
- __ orr(sfpd_hi, sfpd_hi,
- Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ orr(sfpd_hi, sfpd_hi,
- Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
- __ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
-
- } else {
- __ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ ldr(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, kScratchDoubleReg.low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
@@ -3444,23 +3388,12 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (!key_is_constant) {
__ add(elements, elements, Operand(key, LSL, shift_size));
}
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- __ add(elements, elements, Operand(base_offset));
- __ vldr(result, elements, 0);
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- __ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
- } else {
- __ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ ldr(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr->environment());
- }
+ __ add(elements, elements, Operand(base_offset));
+ __ vldr(result, elements, 0);
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ DeoptimizeIf(eq, instr->environment());
}
}
@@ -3821,7 +3754,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3887,7 +3820,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ cmp(input, Operand::Zero());
@@ -3901,20 +3834,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), VFP2);
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
@@ -3938,8 +3869,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), VFP2);
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register input_high = scratch0();
@@ -3961,8 +3891,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), VFP2);
+void LCodeGen::DoMathRound(LMathRound* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
@@ -4001,16 +3930,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), VFP2);
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), VFP2);
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister temp = ToDoubleRegister(instr->temp());
@@ -4032,7 +3959,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
- CpuFeatureScope scope(masm(), VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -4065,7 +3991,6 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -4144,7 +4069,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
@@ -4158,7 +4082,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
@@ -4166,7 +4090,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
@@ -4174,7 +4098,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
@@ -4182,7 +4106,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
@@ -4190,42 +4114,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
@@ -4442,7 +4330,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@@ -4463,7 +4350,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatureScope scope(masm(), VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant
? Operand(constant_key << element_size_shift)
@@ -4513,7 +4399,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@@ -4545,18 +4430,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- Label after_canonicalization;
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ b(vc, &after_canonicalization);
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
-
- __ bind(&after_canonicalization);
+ // Force a canonical NaN.
+ if (masm()->emit_debug_code()) {
+ __ vmrs(ip);
+ __ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ __ Assert(ne, "Default NaN mode not set");
+ }
+ __ VFPCanonicalizeNaN(value);
}
-
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@@ -4814,7 +4695,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4832,7 +4712,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4894,43 +4773,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register src.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register src,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word));
- masm->orr(hiword, scratch,
- Operand(src, LSR, mantissa_shift_for_hi_word));
- } else {
- masm->mov(loword, Operand::Zero());
- masm->orr(hiword, scratch,
- Operand(src, LSL, -mantissa_shift_for_hi_word));
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
- }
-}
-
-
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
@@ -4952,35 +4794,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_s32(dbl_scratch, flt_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
- sfpd_lo, sfpd_hi,
- scratch0(), s0);
- }
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- __ vmov(flt_scratch, src);
- __ vcvt_f64_u32(dbl_scratch, flt_scratch);
- } else {
- Label no_leading_zero, convert_done;
- __ tst(src, Operand(0x80000000));
- __ b(ne, &no_leading_zero);
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1);
- __ b(&convert_done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0);
- __ bind(&convert_done);
- }
+ __ vmov(flt_scratch, src);
+ __ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
if (FLAG_inline_new) {
@@ -4996,30 +4814,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
- if (!CpuFeatures::IsSupported(VFP2)) {
- // Preserve sfpd_lo.
- __ mov(r9, sfpd_lo);
- }
__ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
- if (!CpuFeatures::IsSupported(VFP2)) {
- // Restore sfpd_lo.
- __ mov(sfpd_lo, r9);
- }
__ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
+ __ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -5052,45 +4856,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Label no_special_nan_handling;
Label done;
if (convert_hole) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- DwVfpRegister input_reg = ToDoubleRegister(instr->value());
- __ VFPCompareAndSetFlags(input_reg, input_reg);
- __ b(vc, &no_special_nan_handling);
- __ vmov(reg, scratch0(), input_reg);
- __ cmp(scratch0(), Operand(kHoleNanUpper32));
- Label canonicalize;
- __ b(ne, &canonicalize);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&canonicalize);
- __ Vmov(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg);
- } else {
- Label not_hole;
- __ cmp(sfpd_hi, Operand(kHoleNanUpper32));
- __ b(ne, &not_hole);
- __ Move(reg, factory()->the_hole_value());
- __ b(&done);
- __ bind(&not_hole);
- __ and_(scratch, sfpd_hi, Operand(0x7ff00000));
- __ cmp(scratch, Operand(0x7ff00000));
- __ b(ne, &no_special_nan_handling);
- Label special_nan_handling;
- __ tst(sfpd_hi, Operand(0x000FFFFF));
- __ b(ne, &special_nan_handling);
- __ cmp(sfpd_lo, Operand(0));
- __ b(eq, &no_special_nan_handling);
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ mov(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ mov(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
+ DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+ __ VFPCompareAndSetFlags(input_reg, input_reg);
+ __ b(vc, &no_special_nan_handling);
+ __ vmov(scratch, input_reg.high());
+ __ cmp(scratch, Operand(kHoleNanUpper32));
+ // If not the hole NaN, force the NaN to be canonical.
+ __ VFPCanonicalizeNaN(input_reg, ne);
+ __ b(ne, &no_special_nan_handling);
+ __ Move(reg, factory()->the_hole_value());
+ __ b(&done);
}
__ bind(&no_special_nan_handling);
@@ -5104,13 +4879,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm(), VFP2);
- __ vstr(input_reg, reg, HeapNumber::kValueOffset);
- } else {
- __ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
+ __ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
__ bind(&done);
@@ -5160,7 +4929,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
- CpuFeatureScope scope(masm(), VFP2);
Label load_smi, heap_number, done;
@@ -5249,7 +5017,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
- CpuFeatureScope scope(masm(), VFP2);
Register scratch3 = ToRegister(instr->temp2());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
@@ -5270,11 +5037,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ sub(scratch1, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
- __ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch,
- scratch1, scratch2, scratch3);
+ __ ECMAToInt32(input_reg, double_scratch2, double_scratch,
+ scratch1, scratch2, scratch3);
} else {
- CpuFeatureScope scope(masm(), VFP3);
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
@@ -5369,8 +5135,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- __ ECMAToInt32VFP(result_reg, double_input, double_scratch,
- scratch1, scratch2, scratch3);
+ __ ECMAToInt32(result_reg, double_input, double_scratch,
+ scratch1, scratch2, scratch3);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@@ -5486,7 +5252,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), VFP2);
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
@@ -5495,7 +5260,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatureScope scope(masm(), VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -5503,7 +5267,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatureScope scope(masm(), VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
@@ -5541,7 +5304,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@@ -5554,8 +5316,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
- __ LoadHeapObject(prototype_reg,
- prototypes->at(prototypes->length() - 1));
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
@@ -5671,11 +5431,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
- }
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5703,7 +5463,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
__ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr);
+ }
__ StoreToSafepointRegisterSlot(r0, result);
}
@@ -5737,7 +5503,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -5748,8 +5513,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
+ __ Push(r3, r2, r1);
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(r3, r2, r1);
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
@@ -5762,170 +5529,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(r2));
- ASSERT(!result.is(r2));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ add(r2, result, Operand(elements_offset));
- } else {
- __ ldr(r2, FieldMemOperand(source, i));
- }
- __ str(r2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(r2, Operand(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ str(r2, FieldMemOperand(result, object_size));
- __ str(source, FieldMemOperand(result, object_size + kPointerSize));
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ ldr(r2, FieldMemOperand(source, i));
- __ str(r2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(r2, Operand(value_low));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ mov(r2, Operand(value_high));
- __ str(r2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ add(r2, result, Operand(*offset));
- __ str(r2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- } else {
- __ mov(r2, Operand(value));
- __ str(r2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- // Load map into r2.
- __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- __ cmp(r2, Operand(boilerplate_elements_kind));
- DeoptimizeIf(ne, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ push(r0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
@@ -6002,17 +5605,8 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&allocated);
// Copy the content into the newly allocated memory.
- // (Unroll copy loop once for better throughput).
- for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
- __ ldr(r3, FieldMemOperand(r1, i));
- __ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
- __ str(r3, FieldMemOperand(r0, i));
- __ str(r2, FieldMemOperand(r0, i + kPointerSize));
- }
- if ((size % (2 * kPointerSize)) != 0) {
- __ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
- __ str(r3, FieldMemOperand(r0, size - kPointerSize));
- }
+ __ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(),
+ size / kPointerSize);
}
@@ -6022,7 +5616,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
+ FastNewClosureStub stub(shared_info->language_mode(),
+ shared_info->is_generator());
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 686241db7..3e24dae54 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -137,7 +137,7 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
@@ -294,17 +294,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
diff --git a/deps/v8/src/arm/lithium-gap-resolver-arm.cc b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
index a65ab7e7d..596d58f47 100644
--- a/deps/v8/src/arm/lithium-gap-resolver-arm.cc
+++ b/deps/v8/src/arm/lithium-gap-resolver-arm.cc
@@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
- // ip is overwritten while saving the value to the destination.
+ // ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
@@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), VFP2);
- MemOperand source_operand = cgen_->ToMemOperand(source);
+ MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index bacf570c3..7df0c0a1f 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatureScope scope(this, VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@@ -775,6 +773,23 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
+void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
+ // If needed, restore wanted bits of FPSCR.
+ Label fpscr_done;
+ vmrs(scratch);
+ tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
+ b(ne, &fpscr_done);
+ orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
+ vmsr(scratch);
+ bind(&fpscr_done);
+}
+
+void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond) {
+ vsub(value, value, kDoubleRegZero, cond);
+}
+
+
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
@@ -811,7 +826,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
- ASSERT(IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -873,7 +887,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
- CpuFeatureScope scope(this, VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
CheckFor32DRegs(ip);
@@ -938,7 +951,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
- CpuFeatureScope scope(this, VFP2);
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
sub(r3, fp,
@@ -975,7 +987,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@@ -1402,7 +1413,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
@@ -1421,7 +1431,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
@@ -1991,7 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch4,
Label* fail,
int elements_offset) {
- Label smi_value, maybe_nan, have_double_value, is_nan, done;
+ Label smi_value, store;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -2005,73 +2014,28 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
fail,
DONT_DO_SMI_CHECK);
- // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
- // in the exponent.
- mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
- ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
- cmp(exponent_reg, scratch1);
- b(ge, &maybe_nan);
-
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
-
- bind(&have_double_value);
- add(scratch1, elements_reg,
- Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
- str(exponent_reg, FieldMemOperand(scratch1, offset));
- jmp(&done);
-
- bind(&maybe_nan);
- // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
- // it's an Infinity, and the non-NaN code path applies.
- b(gt, &is_nan);
- ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
- cmp(mantissa_reg, Operand::Zero());
- b(eq, &have_double_value);
- bind(&is_nan);
- // Load canonical NaN for storing into the double array.
- uint64_t nan_int64 = BitCast<uint64_t>(
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
- mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
- jmp(&have_double_value);
+ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+ // Force a canonical NaN.
+ if (emit_debug_code()) {
+ vmrs(ip);
+ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
+ Assert(ne, "Default NaN mode not set");
+ }
+ VFPCanonicalizeNaN(d0);
+ b(&store);
bind(&smi_value);
+ Register untagged_value = scratch1;
+ SmiUntag(untagged_value, value_reg);
+ FloatingPointHelper::ConvertIntToDouble(
+ this, untagged_value, FloatingPointHelper::kVFPRegisters, d0,
+ mantissa_reg, exponent_reg, scratch4, s2);
+
+ bind(&store);
add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
- add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- // scratch1 is now effective address of the double element
-
- FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
-
- Register untagged_value = elements_reg;
- SmiUntag(untagged_value, value_reg);
- FloatingPointHelper::ConvertIntToDouble(this,
- untagged_value,
- destination,
- d0,
- mantissa_reg,
- exponent_reg,
- scratch4,
- s2);
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatureScope scope(this, VFP2);
- vstr(d0, scratch1, 0);
- } else {
- str(mantissa_reg, MemOperand(scratch1, 0));
- str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
- }
- bind(&done);
+ vstr(d0, FieldMemOperand(scratch1,
+ FixedDoubleArray::kHeaderSize - elements_offset));
}
@@ -2425,9 +2389,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatureScope scope(this, VFP2);
-
vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
@@ -2438,9 +2399,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatureScope scope(this, VFP2);
-
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
@@ -2456,8 +2414,6 @@ void MacroAssembler::TryInt32Floor(Register result,
Label* exact) {
ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch));
- ASSERT(CpuFeatures::IsSupported(VFP2));
- CpuFeatureScope scope(this, VFP2);
Label negative, exception;
// Test for NaN and infinities.
@@ -2502,26 +2458,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(this, VFP2);
- vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
- ECMAToInt32VFP(result, double_scratch1, double_scratch2,
- scratch, input_high, input_low);
- } else {
- Ldrd(input_low, input_high,
- FieldMemOperand(source, HeapNumber::kValueOffset));
- ECMAToInt32NoVFP(result, scratch, input_high, input_low);
- }
+ vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
+ ECMAToInt32(result, double_scratch1, double_scratch2,
+ scratch, input_high, input_low);
}
-void MacroAssembler::ECMAToInt32VFP(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register input_high,
- Register input_low) {
- CpuFeatureScope scope(this, VFP2);
+void MacroAssembler::ECMAToInt32(Register result,
+ DwVfpRegister double_input,
+ DwVfpRegister double_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low) {
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@@ -2561,58 +2509,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result,
}
-void MacroAssembler::ECMAToInt32NoVFP(Register result,
- Register scratch,
- Register input_high,
- Register input_low) {
- ASSERT(!result.is(scratch));
- ASSERT(!result.is(input_high));
- ASSERT(!result.is(input_low));
- ASSERT(!scratch.is(input_high));
- ASSERT(!scratch.is(input_low));
- ASSERT(!input_high.is(input_low));
-
- Label both, out_of_range, negate, done;
-
- Ubfx(scratch, input_high,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // Load scratch with exponent.
- sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
- // If exponent is negative, 0 < input < 1, the result is 0.
- // If exponent is greater than or equal to 84, the 32 less significant
- // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
- // the result is 0.
- // This test also catch Nan and infinities which also return 0.
- cmp(scratch, Operand(84));
- // We do an unsigned comparison so negative numbers are treated as big
- // positive number and the two tests above are done in one test.
- b(hs, &out_of_range);
-
- // Load scratch with 20 - exponent.
- rsb(scratch, scratch, Operand(20), SetCC);
- b(mi, &both);
-
- // Test 0 and -0.
- bic(result, input_high, Operand(HeapNumber::kSignMask));
- orr(result, result, Operand(input_low), SetCC);
- b(eq, &done);
- // 0 <= exponent <= 20, shift only input_high.
- // Scratch contains: 20 - exponent.
- Ubfx(result, input_high,
- 0, HeapNumber::kMantissaBitsInTopWord);
- // Set the implicit 1 before the mantissa part in input_high.
- orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
- mov(result, Operand(result, LSR, scratch));
- b(&negate);
-
- bind(&both);
- // Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP.
- rsb(scratch, scratch, Operand(19));
- ECMAToInt32Tail(result, scratch, input_high, input_low,
- &out_of_range, &negate, &done);
-}
-
-
void MacroAssembler::ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
@@ -2715,10 +2611,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -3244,27 +3137,24 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
- RegList temps,
+ DwVfpRegister double_scratch,
+ SwVfpRegister single_scratch,
int field_count) {
- // At least one bit set in the first 15 registers.
- ASSERT((temps & ((1 << 15) - 1)) != 0);
- ASSERT((temps & dst.bit()) == 0);
- ASSERT((temps & src.bit()) == 0);
- // Primitive implementation using only one temporary register.
-
- Register tmp = no_reg;
- // Find a temp register in temps list.
- for (int i = 0; i < 15; i++) {
- if ((temps & (1 << i)) != 0) {
- tmp.set_code(i);
- break;
- }
+ int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
+ for (int i = 0; i < double_count; i++) {
+ vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
+ vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
}
- ASSERT(!tmp.is(no_reg));
- for (int i = 0; i < field_count; i++) {
- ldr(tmp, FieldMemOperand(src, i * kPointerSize));
- str(tmp, FieldMemOperand(dst, i * kPointerSize));
+ STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
+ STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
+
+ int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
+ if (remain != 0) {
+ vldr(single_scratch,
+ FieldMemOperand(src, (field_count - 1) * kPointerSize));
+ vstr(single_scratch,
+ FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
@@ -3463,7 +3353,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@@ -3474,7 +3363,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@@ -3493,7 +3381,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
- ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 958fcacb3..86ae8f22d 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -460,6 +460,19 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
+ // Ensure that FPSCR contains values needed by JavaScript.
+ // We need the NaNModeControlBit to be sure that operations like
+ // vadd and vsub generate the Canonical NaN (if a NaN must be generated).
+ // In VFP3 it will be always the Canonical NaN.
+ // In VFP2 it will be either the Canonical NaN or the negative version
+ // of the Canonical NaN. It doesn't matter if we have two values. The aim
+ // is to be sure to never generate the hole NaN.
+ void VFPEnsureFPSCRState(Register scratch);
+
+ // If the value is a NaN, canonicalize the value else, do nothing.
+ void VFPCanonicalizeNaN(const DwVfpRegister value,
+ const Condition cond = al);
+
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -743,7 +756,11 @@ class MacroAssembler: public Assembler {
Label* gc_required);
// Copies a fixed number of fields of heap objects from src to dst.
- void CopyFields(Register dst, Register src, RegList temps, int field_count);
+ void CopyFields(Register dst,
+ Register src,
+ DwVfpRegister double_scratch,
+ SwVfpRegister single_scratch,
+ int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@@ -969,20 +986,12 @@ class MacroAssembler: public Assembler {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
- void ECMAToInt32VFP(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Register scratch,
- Register input_high,
- Register input_low);
-
- // Performs a truncating conversion of a floating point number as used by
- // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
- // Exits with 'result' holding the answer.
- void ECMAToInt32NoVFP(Register result,
- Register scratch,
- Register input_high,
- Register input_low);
+ void ECMAToInt32(Register result,
+ DwVfpRegister double_input,
+ DwVfpRegister double_scratch,
+ Register scratch,
+ Register input_high,
+ Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
@@ -1140,7 +1149,9 @@ class MacroAssembler: public Assembler {
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
-#if USE_EABI_HARDFLOAT
+#ifdef __arm__
+ return OS::ArmUsingHardFloat();
+#elif USE_EABI_HARDFLOAT
return true;
#else
return false;
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index 2551e14e4..ad4d77df2 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -721,7 +721,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -773,6 +773,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
FPSCR_rounding_mode_ = RZ;
+ FPSCR_default_NaN_mode_ = true;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@@ -902,8 +903,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@@ -953,9 +954,9 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
- memcpy(&vfp_registers_[reg_index * register_size], buffer,
- register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
+ register_size * sizeof(vfp_registers_[0]));
}
@@ -967,64 +968,34 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
- memcpy(buffer, &vfp_registers_[register_size * reg_index],
- register_size * sizeof(vfp_registers_[0]));
- memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
+ register_size * sizeof(vfp_registers_[0]));
+ OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
-// For use in calls that take two double values, constructed either
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
// from r0-r3 or d0 and d1.
-void Simulator::GetFpArgs(double* x, double* y) {
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
+ *z = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
+ OS::MemCopy(buffer, registers_, sizeof(*x));
+ OS::MemCopy(x, buffer, sizeof(*x));
// Registers 2 and 3 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
- }
-}
-
-// For use in calls that take one double value, constructed either
-// from r0 and r1 or d0.
-void Simulator::GetFpArgs(double* x) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from r0 and r1 or d0 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (use_eabi_hardfloat()) {
- *x = vfp_registers_[0];
- *y = registers_[1];
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- // Registers 0 and 1 -> x.
- memcpy(buffer, registers_, sizeof(*x));
- memcpy(x, buffer, sizeof(*x));
- // Register 2 -> y.
- memcpy(buffer, registers_ + 2, sizeof(*y));
- memcpy(y, buffer, sizeof(*y));
+ OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
+ OS::MemCopy(y, buffer, sizeof(*y));
+ // Register 2 -> z.
+ OS::MemCopy(buffer, registers_ + 2, sizeof(*z));
+ OS::MemCopy(z, buffer, sizeof(*z));
}
}
@@ -1033,14 +1004,14 @@ void Simulator::GetFpArgs(double* x, int32_t* y) {
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to d0.
- memcpy(vfp_registers_, buffer, sizeof(buffer));
+ OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
- memcpy(registers_, buffer, sizeof(buffer));
+ OS::MemCopy(registers_, buffer, sizeof(buffer));
}
}
@@ -1619,12 +1590,12 @@ void Simulator::HandleVList(Instruction* instr) {
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
- memcpy(&d, data, 8);
+ OS::MemCopy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
- memcpy(data, &d, 8);
+ OS::MemCopy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
@@ -1647,10 +1618,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@@ -1716,27 +1689,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double dval0, dval1;
- int32_t ival;
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@@ -1748,22 +1721,54 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(r0, static_cast<int32_t>(iresult));
+ set_register(r1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t lo_res = static_cast<int32_t>(result);
- int32_t hi_res = static_cast<int32_t>(result >> 32);
- if (::v8::internal::FLAG_trace_sim) {
- PrintF("Returned %08x\n", lo_res);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
}
- set_register(r0, lo_res);
- set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
@@ -1864,6 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
+double Simulator::canonicalizeNaN(double value) {
+ return (FPSCR_default_NaN_mode_ && isnan(value)) ?
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
+}
+
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@@ -2724,11 +2734,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vabs
double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@@ -2744,6 +2756,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
@@ -2765,12 +2778,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
@@ -2782,6 +2797,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0)) {
// vmla, vmls
@@ -2799,9 +2815,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
- set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
} else {
- set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
+ set_d_register_from_double(
+ vd,
+ canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
@@ -2813,6 +2833,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
+ dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
@@ -2828,9 +2849,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
- memcpy(data, &dd_value, 8);
+ OS::MemCopy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
- memcpy(&dd_value, data, 8);
+ OS::MemCopy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
@@ -2846,6 +2867,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(z_flag_FPSCR_ << 30) |
(c_flag_FPSCR_ << 29) |
(v_flag_FPSCR_ << 28) |
+ (FPSCR_default_NaN_mode_ << 25) |
(inexact_vfp_flag_ << 4) |
(underflow_vfp_flag_ << 3) |
(overflow_vfp_flag_ << 2) |
@@ -2868,6 +2890,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
z_flag_FPSCR_ = (rt_value >> 30) & 1;
c_flag_FPSCR_ = (rt_value >> 29) & 1;
v_flag_FPSCR_ = (rt_value >> 28) & 1;
+ FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
inexact_vfp_flag_ = (rt_value >> 4) & 1;
underflow_vfp_flag_ = (rt_value >> 3) & 1;
overflow_vfp_flag_ = (rt_value >> 2) & 1;
@@ -3179,13 +3202,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
if (instr->HasL()) {
int32_t data[2];
double d = get_double_from_d_register(vm);
- memcpy(data, &d, 8);
+ OS::MemCopy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
- memcpy(&d, data, 8);
+ OS::MemCopy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
@@ -3208,13 +3231,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ReadW(address + 4, instr)
};
double val;
- memcpy(&val, data, 8);
+ OS::MemCopy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
int32_t data[2];
double val = get_double_from_d_register(vd);
- memcpy(data, &val, 8);
+ OS::MemCopy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
@@ -3437,9 +3460,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) {
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
+ OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
+ OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index b918ecf96..45ae999b5 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -274,6 +274,7 @@ class Simulator {
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
+ inline double canonicalizeNaN(double value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
@@ -347,10 +348,8 @@ class Simulator {
void* external_function,
v8::internal::ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
@@ -381,6 +380,7 @@ class Simulator {
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
VFPRoundingMode FPSCR_rounding_mode_;
+ bool FPSCR_default_NaN_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index f2d45e190..f22acb470 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -417,30 +417,48 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Generate StoreField code, value is passed in r0 register.
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ mov(scratch, Operand(cell));
+ __ ldr(scratch,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+ __ cmp(scratch, ip);
+ __ b(ne, miss);
+}
+
+
+// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- Label* miss_restore_name) {
+void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label,
+ Label* miss_restore_name) {
// r0 : value
Label exit;
// Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
+ DO_SMI_CHECK, REQUIRE_EXACT_MAP);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -448,7 +466,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
@@ -466,12 +484,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
- if (lookup->holder() == *object &&
- !holder->HasFastProperties() &&
- !holder->IsJSGlobalProxy() &&
- !holder->IsJSGlobalObject()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ if (lookup->holder() == *object) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm,
+ Handle<GlobalObject>(GlobalObject::cast(holder)),
+ name,
+ scratch1,
+ miss_restore_name);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ }
}
}
@@ -480,7 +504,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -494,33 +518,113 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- int index;
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Operand(transition));
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ // Update the map of the object.
+ __ mov(scratch1, Operand(transition));
+ __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
+ offset,
name_reg,
+ scratch1,
kLRHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
+ kDontSaveFPRegs);
} else {
- index = lookup->GetFieldIndex().field_index();
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ ldr(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ str(value_reg, FieldMemOperand(scratch1, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kLRHasNotBeenSaved,
+ kDontSaveFPRegs);
}
+ // Return the value (register r0).
+ ASSERT(value_reg.is(r0));
+ __ bind(&exit);
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in r0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // r0 : value
+ Label exit;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -926,26 +1030,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ mov(scratch, Operand(cell));
- __ ldr(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ cmp(scratch, ip);
- __ b(ne, miss);
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -975,66 +1059,11 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- __ vmov(s0, ival);
- __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
- __ vcvt_f32_s32(s0, s0);
- __ vstr(s0, scratch1, 0);
- } else {
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
- // Negate value if it is negative.
- __ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ cmp(ival, Operand(1));
- __ b(gt, &not_special);
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
- __ b(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ CountLeadingZeros(zeros, ival, scratch1);
-
- // Compute exponent and or it into the exponent register.
- __ rsb(scratch1,
- zeros,
- Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
-
- __ orr(fval,
- fval,
- Operand(scratch1, LSL, kBinary32ExponentShift));
-
- // Shift up the source chopping the top bit off.
- __ add(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ mov(ival, Operand(ival, LSL, zeros));
- // And the top (top 20 bits).
- __ orr(fval,
- fval,
- Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
-
- __ bind(&done);
- __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
- }
+ Register scratch1) {
+ __ vmov(s0, ival);
+ __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
+ __ vcvt_f32_s32(s0, s0);
+ __ vstr(s0, scratch1, 0);
}
@@ -1225,7 +1254,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
Handle<GlobalObject> global) {
Label miss;
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1233,13 +1262,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- if (!last->HasFastProperties()) {
- __ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), Operand(isolate()->factory()->null_value()));
- __ b(ne, &miss);
- }
-
HandlerFrontendFooter(success, &miss);
}
@@ -1599,7 +1621,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
- r4, r0, elements, r3, r5, r2, r9,
+ r4, r0, elements, r5, r2, r3, r9,
&call_builtin, argc * kDoubleSize);
// Save new length.
@@ -2089,11 +2111,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(VFP2)) {
- return Handle<Code>::null();
- }
-
- CpuFeatureScope scope_vfp2(masm(), VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -3133,36 +3150,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
@@ -3170,29 +3157,23 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) {
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ sub(ip, key, Operand(kHeapObjectTag));
- __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
- __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
- __ b(ne, fail);
- __ TrySmiTag(scratch0, fail, scratch1);
- __ mov(key, scratch0);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
+ Label key_ok;
+ // Check for smi or a smi inside a heap number. We convert the heap
+ // number and check if the conversion is exact and fits into the smi
+ // range.
+ __ JumpIfSmi(key, &key_ok);
+ __ CheckMap(key,
+ scratch0,
+ Heap::kHeapNumberMapRootIndex,
+ fail,
+ DONT_DO_SMI_CHECK);
+ __ sub(ip, key, Operand(kHeapObjectTag));
+ __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
+ __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
+ __ b(ne, fail);
+ __ TrySmiTag(scratch0, fail, scratch1);
+ __ mov(key, scratch0);
+ __ bind(&key_ok);
}
@@ -3262,28 +3243,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(r4, key);
- StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
+ StoreIntAsFloat(masm, r3, r4, r5, r7);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(VFP2)) {
- destination = FloatingPointHelper::kVFPRegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ destination = FloatingPointHelper::kVFPRegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kVFPRegisters) {
- CpuFeatureScope scope(masm, VFP2);
- __ vstr(d0, r3, 0);
- } else {
- __ str(r6, MemOperand(r3, 0));
- __ str(r7, MemOperand(r3, Register::kSizeInBytes));
- }
+ __ vstr(d0, r3, 0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3313,201 +3284,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(masm, VFP2);
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- // vldr requires offset to be a multiple of 4 so we can not
- // include -kHeapObjectTag into it.
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 1));
- __ vcvt_f32_f64(s0, d0);
- __ vstr(s0, r5, 0);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sub(r5, r0, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ add(r5, r3, Operand(key, LSL, 2));
- __ vstr(d0, r5, 0);
- } else {
- // Hoisted load. vldr requires offset to be a multiple of 4 so we can
- // not include -kHeapObjectTag into it.
- __ sub(r5, value, Operand(kHeapObjectTag));
- __ vldr(d0, r5, HeapNumber::kValueOffset);
- __ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ // vldr requires offset to be a multiple of 4 so we can not
+ // include -kHeapObjectTag into it.
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(key, LSL, 1));
+ __ vcvt_f32_f64(s0, d0);
+ __ vstr(s0, r5, 0);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sub(r5, r0, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ add(r5, r3, Operand(key, LSL, 2));
+ __ vstr(d0, r5, 0);
} else {
- // VFP3 is not available do manual conversions.
- __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ b(eq, &nan_or_infinity_or_zero);
-
- __ teq(r9, Operand(r7));
- __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
- __ b(eq, &nan_or_infinity_or_zero);
-
- // Rebias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ add(r9,
- r9,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ cmp(r9, Operand(kBinary32MaxExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
- __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
- __ b(gt, &done);
-
- __ cmp(r9, Operand(kBinary32MinExponent));
- __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
- __ b(lt, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
- __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
-
- __ bind(&done);
- __ str(r5, MemOperand(r3, key, LSL, 1));
- // Entry registers are intact, r0 holds the value which is the return
- // value.
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ and_(r7, r5, Operand(HeapNumber::kSignMask));
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r9, r9, r7);
- __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
- __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
- __ b(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ add(r7, r3, Operand(key, LSL, 2));
- // r7: effective address of destination element.
- __ str(r6, MemOperand(r7, 0));
- __ str(r5, MemOperand(r7, Register::kSizeInBytes));
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ mov(r7, Operand(HeapNumber::kExponentMask));
- __ and_(r9, r5, Operand(r7), SetCC);
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- __ teq(r9, Operand(r7));
- __ mov(r5, Operand::Zero(), LeaveCC, eq);
- __ b(eq, &done);
-
- // Unbias exponent.
- __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
- __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
- // If exponent is negative then result is 0.
- __ mov(r5, Operand::Zero(), LeaveCC, mi);
- __ b(mi, &done);
-
- // If exponent is too big then result is minimal value.
- __ cmp(r9, Operand(meaningfull_bits - 1));
- __ mov(r5, Operand(min_value), LeaveCC, ge);
- __ b(ge, &done);
-
- __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
- __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
- __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
- __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
- __ b(pl, &sign);
-
- __ rsb(r9, r9, Operand::Zero());
- __ mov(r5, Operand(r5, LSL, r9));
- __ rsb(r9, r9, Operand(meaningfull_bits));
- __ orr(r5, r5, Operand(r6, LSR, r9));
-
- __ bind(&sign);
- __ teq(r7, Operand::Zero());
- __ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
-
- __ bind(&done);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ strb(r5, MemOperand(r3, key, LSR, 1));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ strh(r5, MemOperand(r3, key, LSL, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ str(r5, MemOperand(r3, key, LSL, 1));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+ // Hoisted load. vldr requires offset to be a multiple of 4 so we can
+ // not include -kHeapObjectTag into it.
+ __ sub(r5, value, Operand(kHeapObjectTag));
+ __ vldr(d0, r5, HeapNumber::kValueOffset);
+ __ ECMAToInt32(r5, d0, d1, r6, r7, r9);
+
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ strb(r5, MemOperand(r3, key, LSR, 1));
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ strh(r5, MemOperand(r3, key, LSL, 0));
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ str(r5, MemOperand(r3, key, LSL, 1));
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
}
+
+ // Entry registers are intact, r0 holds the value which is the return
+ // value.
+ __ Ret();
}
// Slow case, key and receiver still in r0 and r1.
@@ -3757,9 +3586,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// All registers after this are overwritten.
elements_reg,
scratch1,
- scratch2,
scratch3,
scratch4,
+ scratch2,
&transition_elements_kind);
__ Ret();
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 936d00801..54f0b486e 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -1017,7 +1017,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1068,9 +1068,10 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
+
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
@@ -1111,7 +1112,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1154,7 +1155,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1196,7 +1197,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
- } else if (!IS_SPEC_OBJECT(receiver)) {
+ } else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@@ -1453,8 +1454,10 @@ function ArrayIsArray(obj) {
// -------------------------------------------------------------------
+
function SetUpArray() {
%CheckIsBootstrapping();
+
// Set up non-enumerable constructor property on the Array.prototype
// object.
%SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index deef28b1f..5bde8c538 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM
- // VFP2 and ARMv7 are implied by VFP3.
+ // ARMv7 is implied by VFP3.
if (f == VFP3) {
- mask |=
- static_cast<uint64_t>(1) << VFP2 |
- static_cast<uint64_t>(1) << ARMv7;
+ mask |= static_cast<uint64_t>(1) << ARMv7;
}
#endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask);
@@ -1191,6 +1189,20 @@ ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
}
+ExternalReference ExternalReference::old_data_space_allocation_top_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldDataSpaceAllocationTopAddress());
+}
+
+
+ExternalReference ExternalReference::old_data_space_allocation_limit_address(
+ Isolate* isolate) {
+ return ExternalReference(
+ isolate->heap()->OldDataSpaceAllocationLimitAddress());
+}
+
+
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index e26b5254d..381ae0a80 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -753,6 +753,10 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference old_pointer_space_allocation_limit_address(
Isolate* isolate);
+ static ExternalReference old_data_space_allocation_top_address(
+ Isolate* isolate);
+ static ExternalReference old_data_space_allocation_limit_address(
+ Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 7d7a5b247..e8b065c4e 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -509,6 +509,11 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
+ // If a dictionary map is found in the prototype chain before the actual
+ // target, a new target can always appear. In that case, bail out.
+ // TODO(verwaest): Alternatively a runtime negative lookup on the normal
+ // receiver or prototype could be added.
+ if (type->is_dictionary_map()) return false;
type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
@@ -534,7 +539,6 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
- if (!holder_->HasFastProperties()) return false;
type = Handle<Map>(holder()->map());
}
}
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index dddfc835f..b7331388f 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -1811,7 +1811,8 @@ class CountOperation: public Expression {
Token::Value op_;
bool is_prefix_ : 1;
bool is_monomorphic_ : 1;
- KeyedAccessStoreMode store_mode_: 4;
+ KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
+ // must have extra bit.
Expression* expression_;
int pos_;
const BailoutId assignment_id_;
@@ -1953,7 +1954,8 @@ class Assignment: public Expression {
const BailoutId assignment_id_;
bool is_monomorphic_ : 1;
- KeyedAccessStoreMode store_mode_ : 4;
+ KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
+ // must have extra bit.
SmallMapList receiver_types_;
};
@@ -1962,21 +1964,25 @@ class Yield: public Expression {
public:
DECLARE_NODE_TYPE(Yield)
+ Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
bool is_delegating_yield() const { return is_delegating_yield_; }
virtual int position() const { return pos_; }
protected:
Yield(Isolate* isolate,
+ Expression* generator_object,
Expression* expression,
bool is_delegating_yield,
int pos)
: Expression(isolate),
+ generator_object_(generator_object),
expression_(expression),
is_delegating_yield_(is_delegating_yield),
pos_(pos) { }
private:
+ Expression* generator_object_;
Expression* expression_;
bool is_delegating_yield_;
int pos_;
@@ -2958,9 +2964,12 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(Assignment, assign)
}
- Yield* NewYield(Expression* expression, bool is_delegating_yield, int pos) {
- Yield* yield =
- new(zone_) Yield(isolate_, expression, is_delegating_yield, pos);
+ Yield* NewYield(Expression *generator_object,
+ Expression* expression,
+ bool is_delegating_yield,
+ int pos) {
+ Yield* yield = new(zone_) Yield(
+ isolate_, generator_object, expression, is_delegating_yield, pos);
VISIT_AND_RETURN(Yield, yield)
}
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.cc b/deps/v8/src/atomicops_internals_x86_gcc.cc
index 181c20247..b5078cf4a 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.cc
+++ b/deps/v8/src/atomicops_internals_x86_gcc.cc
@@ -31,6 +31,7 @@
#include <string.h>
#include "atomicops.h"
+#include "platform.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
@@ -84,9 +85,9 @@ void AtomicOps_Internalx86CPUFeaturesInit() {
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
- memcpy(vendor, &ebx, 4);
- memcpy(vendor + 4, &edx, 4);
- memcpy(vendor + 8, &ecx, 4);
+ v8::internal::OS::MemCopy(vendor, &ebx, 4);
+ v8::internal::OS::MemCopy(vendor + 4, &edx, 4);
+ v8::internal::OS::MemCopy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax
diff --git a/deps/v8/src/atomicops_internals_x86_gcc.h b/deps/v8/src/atomicops_internals_x86_gcc.h
index 6e55b5018..e58d598fb 100644
--- a/deps/v8/src/atomicops_internals_x86_gcc.h
+++ b/deps/v8/src/atomicops_internals_x86_gcc.h
@@ -168,7 +168,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
-#if defined(__x86_64__)
+#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.
diff --git a/deps/v8/src/bignum.cc b/deps/v8/src/bignum.cc
index 9436322ed..c8b61eef5 100644
--- a/deps/v8/src/bignum.cc
+++ b/deps/v8/src/bignum.cc
@@ -735,6 +735,13 @@ void Bignum::BigitsShiftLeft(int shift_amount) {
void Bignum::SubtractTimes(const Bignum& other, int factor) {
+#ifdef DEBUG
+ Bignum a, b;
+ a.AssignBignum(*this);
+ b.AssignBignum(other);
+ b.MultiplyByUInt32(factor);
+ a.SubtractBignum(b);
+#endif
ASSERT(exponent_ <= other.exponent_);
if (factor < 3) {
for (int i = 0; i < factor; ++i) {
@@ -758,9 +765,9 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
Chunk difference = bigits_[i] - borrow;
bigits_[i] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
- ++i;
}
Clamp();
+ ASSERT(Bignum::Equal(a, *this));
}
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index f57a1f6fd..12f0cdac6 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -199,6 +199,8 @@ class Genesis BASE_EMBEDDED {
const char* name,
ElementsKind elements_kind);
bool InstallNatives();
+
+ void InstallTypedArray(const char* name);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@@ -303,14 +305,11 @@ Handle<Context> Bootstrapper::CreateEnvironment(
v8::ExtensionConfiguration* extensions) {
HandleScope scope(isolate_);
Genesis genesis(isolate_, global_object, global_template, extensions);
- if (!genesis.result().is_null()) {
- Handle<Object> ctx(isolate_->global_handles()->Create(*genesis.result()));
- Handle<Context> env = Handle<Context>::cast(ctx);
- if (InstallExtensions(env, extensions)) {
- return env;
- }
+ Handle<Context> env = genesis.result();
+ if (env.is_null() || !InstallExtensions(env, extensions)) {
+ return Handle<Context>();
}
- return Handle<Context>();
+ return scope.CloseAndEscape(env);
}
@@ -477,25 +476,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
- Handle<Map> object_prototype_map =
- factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- Handle<DescriptorArray> prototype_descriptors(
- factory->NewDescriptorArray(0, 1));
- DescriptorArray::WhitenessWitness witness(*prototype_descriptors);
-
- Handle<Foreign> object_prototype(
- factory->NewForeign(&Accessors::ObjectPrototype));
- PropertyAttributes attribs = static_cast<PropertyAttributes>(DONT_ENUM);
- object_prototype_map->set_instance_descriptors(*prototype_descriptors);
-
- { // Add __proto__.
- CallbacksDescriptor d(heap->proto_string(), *object_prototype, attribs);
- object_prototype_map->AppendDescriptor(&d, witness);
- }
-
- Handle<JSObject> prototype = factory->NewJSObjectFromMap(
- object_prototype_map,
+ Handle<JSObject> prototype = factory->NewJSObject(
+ isolate->object_function(),
TENURED);
+
native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
}
@@ -1276,6 +1260,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
+void Genesis::InstallTypedArray(const char* name) {
+ Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+ InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
+ JSTypedArray::kSize, isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
+}
+
+
void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
@@ -1293,33 +1285,80 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_collections) {
{ // -- S e t
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
}
{ // -- M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
}
{ // -- W e a k M a p
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
- prototype, Builtins::kIllegal, true);
+ isolate()->initial_object_prototype(),
+ Builtins::kIllegal, true);
}
}
if (FLAG_harmony_typed_arrays) {
- { // -- A r r a y B u f f e r
- Handle<JSObject> prototype =
- factory()->NewJSObject(isolate()->object_function(), TENURED);
+ { // -- A r r a y B u f f e r
InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
- JSArrayBuffer::kSize, prototype,
+ JSArrayBuffer::kSize,
+ isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
+ {
+ // -- T y p e d A r r a y s
+ InstallTypedArray("__Int8Array");
+ InstallTypedArray("__Uint8Array");
+ InstallTypedArray("__Int16Array");
+ InstallTypedArray("__Uint16Array");
+ InstallTypedArray("__Int32Array");
+ InstallTypedArray("__Uint32Array");
+ InstallTypedArray("__Float32Array");
+ InstallTypedArray("__Float64Array");
+ }
+ }
+
+ if (FLAG_harmony_generators) {
+ // Create generator meta-objects and install them on the builtins object.
+ Handle<JSObject> builtins(native_context()->builtins());
+ Handle<JSObject> generator_object_prototype =
+ factory()->NewJSObject(isolate()->object_function(), TENURED);
+ Handle<JSFunction> generator_function_prototype =
+ InstallFunction(builtins, "GeneratorFunctionPrototype",
+ JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
+ generator_object_prototype, Builtins::kIllegal,
+ false);
+ InstallFunction(builtins, "GeneratorFunction",
+ JS_FUNCTION_TYPE, JSFunction::kSize,
+ generator_function_prototype, Builtins::kIllegal,
+ false);
+
+ // Create maps for generator functions and their prototypes. Store those
+ // maps in the native context.
+ Handle<Map> function_map(native_context()->function_map());
+ Handle<Map> generator_function_map = factory()->CopyMap(function_map);
+ generator_function_map->set_prototype(*generator_function_prototype);
+ native_context()->set_generator_function_map(*generator_function_map);
+
+ Handle<Map> strict_mode_function_map(
+ native_context()->strict_mode_function_map());
+ Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
+ strict_mode_function_map);
+ strict_mode_generator_function_map->set_prototype(
+ *generator_function_prototype);
+ native_context()->set_strict_mode_generator_function_map(
+ *strict_mode_generator_function_map);
+
+ Handle<Map> object_map(native_context()->object_function()->initial_map());
+ Handle<Map> generator_object_prototype_map = factory()->CopyMap(
+ object_map, 0);
+ generator_object_prototype_map->set_prototype(
+ *generator_object_prototype);
+ native_context()->set_generator_object_prototype_map(
+ *generator_object_prototype_map);
}
}
@@ -1933,6 +1972,11 @@ bool Genesis::InstallExperimentalNatives() {
"native typedarray.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
+ if (FLAG_harmony_generators &&
+ strcmp(ExperimentalNatives::GetScriptName(i).start(),
+ "native generator.js") == 0) {
+ if (!CompileExperimentalBuiltin(isolate(), i)) return false;
+ }
}
InstallExperimentalNativeFunctions();
diff --git a/deps/v8/src/builtins-decls.h b/deps/v8/src/builtins-decls.h
new file mode 100644
index 000000000..beb5dd1e8
--- /dev/null
+++ b/deps/v8/src/builtins-decls.h
@@ -0,0 +1,40 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_BUILTINS_DECLS_H_
+#define V8_BUILTINS_DECLS_H_
+
+#include "arguments.h"
+
+namespace v8 {
+namespace internal {
+
+DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
+
+} } // namespace v8::internal
+
+#endif // V8_BUILTINS_DECLS_H_
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index f8d562b34..56c0501e5 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -125,23 +125,31 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
-#define BUILTIN(name) \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
- name##ArgumentsType args, Isolate* isolate); \
- MUST_USE_RESULT static MaybeObject* Builtin_##name( \
- name##ArgumentsType args, Isolate* isolate) { \
- ASSERT(isolate == Isolate::Current()); \
- args.Verify(); \
- return Builtin_Impl_##name(args, isolate); \
- } \
- MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+#define BUILTIN(name) \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ MUST_USE_RESULT static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ ASSERT(isolate == Isolate::Current()); \
+ args.Verify(); \
+ return Builtin_Impl_##name(args, isolate); \
+ } \
+ MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
-#define BUILTIN(name) \
- static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
-
+#define BUILTIN(name) \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate); \
+ static MaybeObject* Builtin_##name( \
+ int args_length, Object** args_object, Isolate* isolate) { \
+ name##ArgumentsType args(args_length, args_object); \
+ return Builtin_impl##name(args, isolate); \
+ } \
+ static MaybeObject* Builtin_impl##name( \
+ name##ArgumentsType args, Isolate* isolate)
#endif
@@ -323,9 +331,9 @@ static void MoveDoubleElements(FixedDoubleArray* dst,
int src_index,
int len) {
if (len == 0) return;
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kDoubleSize);
+ OS::MemMove(dst->data_start() + dst_index,
+ src->data_start() + src_index,
+ len * kDoubleSize);
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index 12ed56af7..ee607ad30 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -274,8 +274,6 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
-MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
-
class BuiltinFunctionTable;
class ObjectVisitor;
diff --git a/deps/v8/src/code-stubs-hydrogen.cc b/deps/v8/src/code-stubs-hydrogen.cc
index ee903aef1..02c009188 100644
--- a/deps/v8/src/code-stubs-hydrogen.cc
+++ b/deps/v8/src/code-stubs-hydrogen.cc
@@ -147,6 +147,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
AddSimulate(BailoutId::StubEntry());
+ NoObservableSideEffectsScope no_effects(this);
+
HValue* return_value = BuildCodeStub();
// We might have extra expressions to pop from the stack in addition to the
@@ -189,6 +191,70 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
template <>
+HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+ AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
+ FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
+ int length = casted_stub()->length();
+
+ HInstruction* boilerplate =
+ AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
+ GetParameter(1),
+ NULL,
+ FAST_ELEMENTS));
+
+ CheckBuilder builder(this);
+ builder.CheckNotUndefined(boilerplate);
+
+ if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
+ HValue* elements =
+ AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+
+ IfBuilder if_fixed_cow(this);
+ if_fixed_cow.BeginIfMapEquals(elements, factory->fixed_cow_array_map());
+ environment()->Push(BuildCloneShallowArray(context(),
+ boilerplate,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ 0/*copy-on-write*/));
+ if_fixed_cow.BeginElse();
+
+ IfBuilder if_fixed(this);
+ if_fixed.BeginIfMapEquals(elements, factory->fixed_array_map());
+ environment()->Push(BuildCloneShallowArray(context(),
+ boilerplate,
+ alloc_site_mode,
+ FAST_ELEMENTS,
+ length));
+ if_fixed.BeginElse();
+
+ environment()->Push(BuildCloneShallowArray(context(),
+ boilerplate,
+ alloc_site_mode,
+ FAST_DOUBLE_ELEMENTS,
+ length));
+ } else {
+ ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
+ environment()->Push(BuildCloneShallowArray(context(),
+ boilerplate,
+ alloc_site_mode,
+ elements_kind,
+ length));
+ }
+
+ return environment()->Pop();
+}
+
+
+Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
+ CodeStubGraphBuilder<FastCloneShallowArrayStub> builder(this);
+ LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+ return chunk->Codegen(Code::COMPILED_STUB);
+}
+
+
+template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
@@ -230,7 +296,6 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
factory->empty_string(),
value,
true, i));
- AddSimulate(BailoutId::StubEntry());
}
builder.End();
@@ -264,7 +329,6 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, casted_stub()->store_mode(), Representation::Tagged());
- AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
return GetParameter(2);
}
@@ -308,7 +372,7 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
HValue* new_elements =
- BuildAllocateElements(context(), to_kind, elements_length);
+ BuildAllocateAndInitializeElements(context(), to_kind, elements_length);
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
@@ -320,13 +384,11 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
factory->elements_field_string(),
new_elements, true,
JSArray::kElementsOffset));
- AddSimulate(BailoutId::StubEntry());
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
map, true, JSArray::kMapOffset));
- AddSimulate(BailoutId::StubEntry());
return js_array;
}
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index eff0f7f1c..ad418d69c 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -619,8 +619,10 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
- StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).GetCode(isolate);
- StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).GetCode(isolate);
+ StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
+ StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
+ stub1.GetCode(isolate)->set_is_pregenerated(true);
+ stub2.GetCode(isolate)->set_is_pregenerated(true);
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index 99ff5154e..60c4fb9bd 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -393,17 +393,24 @@ class ToNumberStub: public PlatformCodeStub {
class FastNewClosureStub : public PlatformCodeStub {
public:
- explicit FastNewClosureStub(LanguageMode language_mode)
- : language_mode_(language_mode) { }
+ explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
+ : language_mode_(language_mode),
+ is_generator_(is_generator) { }
void Generate(MacroAssembler* masm);
private:
+ class StrictModeBits: public BitField<bool, 0, 1> {};
+ class IsGeneratorBits: public BitField<bool, 1, 1> {};
+
Major MajorKey() { return FastNewClosure; }
- int MinorKey() { return language_mode_ == CLASSIC_MODE
- ? kNonStrictMode : kStrictMode; }
+ int MinorKey() {
+ return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
+ IsGeneratorBits::encode(is_generator_);
+ }
LanguageMode language_mode_;
+ bool is_generator_;
};
@@ -443,7 +450,7 @@ class FastNewBlockContextStub : public PlatformCodeStub {
};
-class FastCloneShallowArrayStub : public PlatformCodeStub {
+class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
@@ -467,7 +474,31 @@ class FastCloneShallowArrayStub : public PlatformCodeStub {
ASSERT_LE(length_, kMaximumClonedLength);
}
- void Generate(MacroAssembler* masm);
+ Mode mode() const { return mode_; }
+ int length() const { return length_; }
+ AllocationSiteMode allocation_site_mode() const {
+ return allocation_site_mode_;
+ }
+
+ ElementsKind ComputeElementsKind() const {
+ switch (mode()) {
+ case CLONE_ELEMENTS:
+ case COPY_ON_WRITE_ELEMENTS:
+ return FAST_ELEMENTS;
+ case CLONE_DOUBLE_ELEMENTS:
+ return FAST_DOUBLE_ELEMENTS;
+ case CLONE_ANY_ELEMENTS:
+ /*fall-through*/;
+ }
+ UNREACHABLE();
+ return LAST_ELEMENTS_KIND;
+ }
+
+ virtual Handle<Code> GenerateCode();
+
+ virtual void InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor);
private:
Mode mode_;
@@ -746,7 +777,7 @@ class BinaryOpStub: public PlatformCodeStub {
private:
Token::Value op_;
OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
+ bool platform_specific_bit_; // Indicates SSE3 on IA32.
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
@@ -1604,18 +1635,25 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
- : function_mode_(function_mode) {}
+ : fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
virtual bool IsPregenerated() { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
+ class FPRegisters: public BitField<bool, 0, 1> {};
+ class FunctionModeField: public BitField<StubFunctionMode, 1, 1> {};
+
Major MajorKey() { return StubFailureTrampoline; }
- int MinorKey() { return static_cast<int>(function_mode_); }
+ int MinorKey() {
+ return FPRegisters::encode(fp_registers_) |
+ FunctionModeField::encode(function_mode_);
+ }
void Generate(MacroAssembler* masm);
+ bool fp_registers_;
StubFunctionMode function_mode_;
DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index c872efbb3..950c7e737 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -27,16 +27,20 @@
"use strict";
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
var $Set = global.Set;
var $Map = global.Map;
var $WeakMap = global.WeakMap;
-//-------------------------------------------------------------------
-
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
var undefined_sentinel = {};
+// -------------------------------------------------------------------
+// Harmony Set
function SetConstructor() {
if (%_IsConstructCall()) {
@@ -107,6 +111,31 @@ function SetClear() {
}
+// -------------------------------------------------------------------
+
+function SetUpSet() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Set, SetConstructor);
+ %FunctionSetPrototype($Set, new $Object());
+ %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the Set prototype object.
+ InstallGetter($Set.prototype, "size", SetGetSize);
+ InstallFunctions($Set.prototype, DONT_ENUM, $Array(
+ "add", SetAdd,
+ "has", SetHas,
+ "delete", SetDelete,
+ "clear", SetClear
+ ));
+}
+
+SetUpSet();
+
+
+// -------------------------------------------------------------------
+// Harmony Map
+
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
@@ -183,6 +212,32 @@ function MapClear() {
}
+// -------------------------------------------------------------------
+
+function SetUpMap() {
+ %CheckIsBootstrapping();
+
+ %SetCode($Map, MapConstructor);
+ %FunctionSetPrototype($Map, new $Object());
+ %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+
+ // Set up the non-enumerable functions on the Map prototype object.
+ InstallGetter($Map.prototype, "size", MapGetSize);
+ InstallFunctions($Map.prototype, DONT_ENUM, $Array(
+ "get", MapGet,
+ "set", MapSet,
+ "has", MapHas,
+ "delete", MapDelete,
+ "clear", MapClear
+ ));
+}
+
+SetUpMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
@@ -239,42 +294,14 @@ function WeakMapDelete(key) {
return %WeakMapDelete(this, key);
}
+
// -------------------------------------------------------------------
-(function () {
+function SetUpWeakMap() {
%CheckIsBootstrapping();
- // Set up the Set and Map constructor function.
- %SetCode($Set, SetConstructor);
- %SetCode($Map, MapConstructor);
-
- // Set up the constructor property on the Set and Map prototype object.
- %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
- %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
-
- // Set up the non-enumerable functions on the Set prototype object.
- InstallGetter($Set.prototype, "size", SetGetSize);
- InstallFunctions($Set.prototype, DONT_ENUM, $Array(
- "add", SetAdd,
- "has", SetHas,
- "delete", SetDelete,
- "clear", SetClear
- ));
-
- // Set up the non-enumerable functions on the Map prototype object.
- InstallGetter($Map.prototype, "size", MapGetSize);
- InstallFunctions($Map.prototype, DONT_ENUM, $Array(
- "get", MapGet,
- "set", MapSet,
- "has", MapHas,
- "delete", MapDelete,
- "clear", MapClear
- ));
-
- // Set up the WeakMap constructor function.
%SetCode($WeakMap, WeakMapConstructor);
-
- // Set up the constructor property on the WeakMap prototype object.
+ %FunctionSetPrototype($WeakMap, new $Object());
%SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
// Set up the non-enumerable functions on the WeakMap prototype object.
@@ -284,4 +311,6 @@ function WeakMapDelete(key) {
"has", WeakMapHas,
"delete", WeakMapDelete
));
-})();
+}
+
+SetUpWeakMap();
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 6f9b901d9..184429b41 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -520,14 +520,15 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
- ParsingFlags flags = kNoParsingFlags;
- if ((info->pre_parse_data() != NULL ||
- String::cast(script->source())->length() > FLAG_min_preparse_length) &&
- !DebuggerWantsEagerCompilation(info)) {
- flags = kAllowLazy;
- }
- if (!ParserApi::Parse(info, flags)) {
- return Handle<SharedFunctionInfo>::null();
+ {
+ Parser parser(info);
+ if ((info->pre_parse_data() != NULL ||
+ String::cast(script->source())->length() > FLAG_min_preparse_length) &&
+ !DebuggerWantsEagerCompilation(info))
+ parser.set_allow_lazy(true);
+ if (!parser.Parse()) {
+ return Handle<SharedFunctionInfo>::null();
+ }
}
// Measure how long it takes to do the compilation; only take the
@@ -864,7 +865,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
if (InstallCodeFromOptimizedCodeMap(info)) return true;
// Generate the AST for the lazily compiled function.
- if (ParserApi::Parse(info, kNoParsingFlags)) {
+ if (Parser::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
@@ -932,7 +933,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
return;
}
- if (ParserApi::Parse(*info, kNoParsingFlags)) {
+ if (Parser::Parse(*info)) {
LanguageMode language_mode = info->function()->language_mode();
info->SetLanguageMode(language_mode);
shared->set_language_mode(language_mode);
@@ -957,18 +958,18 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
- if (shared->code()->stack_check_patched_for_osr()) {
+ if (shared->code()->back_edges_patched_for_osr()) {
// At this point we either put the function on recompilation queue or
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
InterruptStub interrupt_stub;
- Handle<Code> check_code = interrupt_stub.GetCode(isolate);
+ Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertStackCheckCode(shared->code(),
- *check_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(shared->code(),
+ *interrupt_code,
+ *replacement_code);
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index a0ba2f7eb..abeb8121c 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -165,6 +165,11 @@ enum BindingFlags {
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
+ V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
+ V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
+ strict_mode_generator_function_map) \
+ V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
+ generator_object_prototype_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -295,6 +300,9 @@ class Context: public FixedArray {
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
+ GENERATOR_FUNCTION_MAP_INDEX,
+ STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
+ GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@@ -439,6 +447,16 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
+ static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
+ return is_generator
+ ? (language_mode == CLASSIC_MODE
+ ? GENERATOR_FUNCTION_MAP_INDEX
+ : STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
+ : (language_mode == CLASSIC_MODE
+ ? FUNCTION_MAP_INDEX
+ : STRICT_MODE_FUNCTION_MAP_INDEX);
+ }
+
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.
diff --git a/deps/v8/src/conversions-inl.h b/deps/v8/src/conversions-inl.h
index 7edaf2240..eb718d684 100644
--- a/deps/v8/src/conversions-inl.h
+++ b/deps/v8/src/conversions-inl.h
@@ -77,7 +77,7 @@ inline unsigned int FastD2UI(double x) {
uint32_t result;
Address mantissa_ptr = reinterpret_cast<Address>(&x);
// Copy least significant 32 bits of mantissa.
- memcpy(&result, mantissa_ptr, sizeof(result));
+ OS::MemCopy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.
diff --git a/deps/v8/src/cpu-profiler.cc b/deps/v8/src/cpu-profiler.cc
index 7979eb4d2..47c2a9423 100644
--- a/deps/v8/src/cpu-profiler.cc
+++ b/deps/v8/src/cpu-profiler.cc
@@ -445,7 +445,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
is_profiling_ = true;
- processor_->Start();
+ processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
if (isolate_->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@@ -459,11 +459,11 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
// Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_);
+ sampler->IncreaseProfilingDepth();
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
- sampler->IncreaseProfilingDepth();
}
}
diff --git a/deps/v8/src/cpu-profiler.h b/deps/v8/src/cpu-profiler.h
index 89d9c81c1..6e2b0e09c 100644
--- a/deps/v8/src/cpu-profiler.h
+++ b/deps/v8/src/cpu-profiler.h
@@ -31,6 +31,7 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
+#include "sampler.h"
#include "unbound-queue.h"
namespace v8 {
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index b57e3907c..8f6e384c1 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -45,6 +45,10 @@
#include "../include/v8-testing.h"
#endif // V8_SHARED
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+#include "third_party/vtune/v8-vtune.h"
+#endif
+
#include "d8.h"
#ifndef V8_SHARED
@@ -144,6 +148,11 @@ class DumbLineEditor: public LineEditor {
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
+#if defined(__native_client__)
+ // Native Client libc is used to being embedded in Chrome and
+ // has trouble recognizing when to flush.
+ fflush(stdout);
+#endif
return Shell::ReadFromStdin(isolate_);
}
@@ -1921,6 +1930,9 @@ int Shell::Main(int argc, char* argv[]) {
DumbLineEditor dumb_line_editor(isolate);
{
Initialize(isolate);
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+ vTune::InitilizeVtuneForV8();
+#endif
Symbols symbols(isolate);
InitializeDebugger(isolate);
diff --git a/deps/v8/src/d8.gyp b/deps/v8/src/d8.gyp
index cce8f2a1f..ea043dcf4 100644
--- a/deps/v8/src/d8.gyp
+++ b/deps/v8/src/d8.gyp
@@ -29,6 +29,8 @@
'includes': ['../build/common.gypi'],
'variables': {
'console%': '',
+ # Enable support for Intel VTune. Supported on ia32/x64 only
+ 'v8_enable_vtunejit%': 0,
},
'targets': [
{
@@ -70,6 +72,11 @@
}],
],
}],
+ ['v8_enable_vtunejit==1', {
+ 'dependencies': [
+ '../src/third_party/vtune/v8vtune.gyp:v8_vtune',
+ ],
+ }],
],
},
{
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index c75d12c65..62999e9de 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -25,20 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
// var $isFinite = GlobalIsFinite;
+var $Date = global.Date;
+
// -------------------------------------------------------------------
// This file contains date support implemented in JavaScript.
-// Keep reference to original values of some global properties. This
-// has the added benefit that the code in this file is isolated from
-// changes to these properties.
-var $Date = global.Date;
-
// Helper function to throw error.
function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
@@ -142,7 +138,7 @@ var Date_cache = {
};
-%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
+function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
return (new $Date()).toString();
@@ -199,10 +195,7 @@ var Date_cache = {
value = MakeDate(day, time);
SET_LOCAL_DATE_VALUE(this, value);
}
-});
-
-
-%FunctionSetPrototype($Date, new $Date($NaN));
+}
var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
@@ -767,6 +760,10 @@ function ResetDateCache() {
function SetUpDate() {
%CheckIsBootstrapping();
+
+ %SetCode($Date, DateConstructor);
+ %FunctionSetPrototype($Date, new $Date($NaN));
+
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(
"UTC", DateUTC,
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index 91af9ccc1..4af2194ea 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -551,9 +551,9 @@ void Debug::ThreadInit() {
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
to += sizeof(ThreadLocal);
- memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
+ OS::MemCopy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
ThreadInit();
ASSERT(to <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
@@ -562,9 +562,10 @@ char* Debug::ArchiveDebug(char* storage) {
char* Debug::RestoreDebug(char* storage) {
char* from = storage;
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
from += sizeof(ThreadLocal);
- memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
+ OS::MemCopy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
ASSERT(from <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
}
@@ -874,8 +875,9 @@ bool Debug::Load() {
// Check for caught exceptions.
if (caught_exception) return false;
- // Debugger loaded.
- debug_context_ = context;
+ // Debugger loaded, create debugger context global handle.
+ debug_context_ = Handle<Context>::cast(
+ isolate_->global_handles()->Create(*context));
return true;
}
@@ -891,7 +893,7 @@ void Debug::Unload() {
DestroyScriptCache();
// Clear debugger context global handle.
- Isolate::Current()->global_handles()->Destroy(
+ isolate_->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 601faf723..1af736575 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -1204,6 +1204,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
+ ASSERT(descriptor->register_param_count_ >= 0);
int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
sizeof(Arguments) + kPointerSize;
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
@@ -2029,52 +2030,96 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
-void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and patch every stack check
+void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ // Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- ASSERT(!unoptimized_code->stack_check_patched_for_osr());
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
+ int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
+ Address back_edge_cursor = unoptimized_code->instruction_start() +
+ unoptimized_code->back_edge_table_offset();
+ uint32_t table_length = Memory::uint32_at(back_edge_cursor);
+ back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
- Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- PatchStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
+ uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
+ if (loop_depth == loop_nesting_level) {
+ // Loop back edge has the loop depth that we want to patch.
+ uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
+ Address pc_after = unoptimized_code->instruction_start() + pc_offset;
+ PatchInterruptCodeAt(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code);
+ }
+ back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
- unoptimized_code->set_stack_check_patched_for_osr(true);
+ unoptimized_code->set_back_edges_patched_for_osr(true);
+#ifdef DEBUG
+ Deoptimizer::VerifyInterruptCode(
+ unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
+#endif // DEBUG
}
-void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code) {
- // Iterate over the stack check table and revert the patched
- // stack check calls.
+void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ // Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
- ASSERT(unoptimized_code->stack_check_patched_for_osr());
- Address stack_check_cursor = unoptimized_code->instruction_start() +
- unoptimized_code->stack_check_table_offset();
- uint32_t table_length = Memory::uint32_at(stack_check_cursor);
- stack_check_cursor += kIntSize;
+ ASSERT(unoptimized_code->back_edges_patched_for_osr());
+ int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
+ Address back_edge_cursor = unoptimized_code->instruction_start() +
+ unoptimized_code->back_edge_table_offset();
+ uint32_t table_length = Memory::uint32_at(back_edge_cursor);
+ back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
- uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
+ uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
+ if (loop_depth <= loop_nesting_level) {
+ uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
+ Address pc_after = unoptimized_code->instruction_start() + pc_offset;
+ RevertInterruptCodeAt(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code);
+ }
+ back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
+ }
+ unoptimized_code->set_back_edges_patched_for_osr(false);
+#ifdef DEBUG
+ // Assert that none of the back edges are patched anymore.
+ Deoptimizer::VerifyInterruptCode(
+ unoptimized_code, interrupt_code, replacement_code, -1);
+#endif // DEBUG
+}
+
+
+#ifdef DEBUG
+void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code,
+ int loop_nesting_level) {
+ CHECK(unoptimized_code->kind() == Code::FUNCTION);
+ Address back_edge_cursor = unoptimized_code->instruction_start() +
+ unoptimized_code->back_edge_table_offset();
+ uint32_t table_length = Memory::uint32_at(back_edge_cursor);
+ back_edge_cursor += kIntSize;
+ for (uint32_t i = 0; i < table_length; ++i) {
+ uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
+ CHECK_LE(loop_depth, Code::kMaxLoopNestingMarker);
+ // Assert that all back edges for shallower loops (and only those)
+ // have already been patched.
+ uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
- RevertStackCheckCodeAt(unoptimized_code,
- pc_after,
- check_code,
- replacement_code);
- stack_check_cursor += 2 * kIntSize;
+ CHECK_EQ((loop_depth <= loop_nesting_level),
+ InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
- unoptimized_code->set_stack_check_patched_for_osr(false);
}
+#endif // DEBUG
unsigned Deoptimizer::ComputeInputFrameSize() const {
@@ -2327,7 +2372,8 @@ int32_t TranslationIterator::Next() {
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
- memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
+ OS::MemCopy(
+ result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 895ed6690..97b2206b6 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -210,32 +210,45 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
- // Patch all stack guard checks in the unoptimized code to
+ // Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
- static void PatchStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
+ static void PatchInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code);
- // Patch stack guard check at instruction before pc_after in
+ // Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
- static void PatchStackCheckCodeAt(Code* unoptimized_code,
+ static void PatchInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code);
+
+ // Change all patched interrupts patched in the unoptimized code
+ // back to normal interrupts.
+ static void RevertInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code);
+
+ // Change patched interrupt in the unoptimized code
+ // back to a normal interrupt.
+ static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* check_code,
+ Code* interrupt_code,
Code* replacement_code);
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCode(Code* unoptimized_code,
- Code* check_code,
- Code* replacement_code);
-
- // Change all patched stack guard checks in the unoptimized code
- // back to a normal stack guard check.
- static void RevertStackCheckCodeAt(Code* unoptimized_code,
+#ifdef DEBUG
+ static bool InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
- Code* check_code,
+ Code* interrupt_code,
Code* replacement_code);
+ // Verify that all back edges of a certain loop depth are patched.
+ static void VerifyInterruptCode(Code* unoptimized_code,
+ Code* interrupt_code,
+ Code* replacement_code,
+ int loop_nesting_level);
+#endif // DEBUG
+
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);
diff --git a/deps/v8/src/disassembler.cc b/deps/v8/src/disassembler.cc
index f168f84ae..b01b44315 100644
--- a/deps/v8/src/disassembler.cc
+++ b/deps/v8/src/disassembler.cc
@@ -50,8 +50,8 @@ void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
pc - begin,
*pc);
} else {
- fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
- reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
+ PrintF(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
+ reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
}
}
}
@@ -101,13 +101,12 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
if (f == NULL) {
PrintF("%s\n", out->Finalize());
} else {
- fprintf(f, "%s\n", out->Finalize());
+ PrintF(f, "%s\n", out->Finalize());
}
out->Reset();
}
-
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
@@ -337,10 +336,10 @@ void Disassembler::Decode(FILE* f, Code* code) {
code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
- // If there might be a stack check table, stop before reaching it.
+ // If there might be a back edge table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
- Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
+ Min(decode_size, static_cast<int>(code->back_edge_table_offset()));
}
byte* begin = code->instruction_start();
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 9deef6061..7c2b56851 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -183,7 +183,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
- copy_size);
+ static_cast<size_t>(copy_size));
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
@@ -339,7 +339,7 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to_address),
reinterpret_cast<Object**>(from_address),
- words_per_double * copy_size);
+ static_cast<size_t>(words_per_double * copy_size));
}
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index dee311268..025a25619 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
+#include "deoptimizer.h"
#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
@@ -448,6 +449,19 @@ void StackGuard::RequestGC() {
}
+bool StackGuard::IsFullDeopt() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
+}
+
+
+void StackGuard::FullDeopt() {
+ ExecutionAccess access(isolate_);
+ thread_local_.interrupt_flags_ |= FULL_DEOPT;
+ set_interrupt_limits(access);
+}
+
+
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@@ -488,7 +502,7 @@ void StackGuard::Continue(InterruptFlag after_what) {
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
- memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
+ OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
@@ -505,7 +519,8 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
- memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
+ OS::MemCopy(
+ reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
@@ -880,7 +895,6 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
-
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
isolate->runtime_profiler()->OptimizeNow();
@@ -898,6 +912,10 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(INTERRUPT);
return isolate->StackOverflow();
}
+ if (stack_guard->IsFullDeopt()) {
+ stack_guard->Continue(FULL_DEOPT);
+ Deoptimizer::DeoptimizeAll(isolate);
+ }
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index b104180c9..9cf8ac649 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -41,7 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- GC_REQUEST = 1 << 5
+ GC_REQUEST = 1 << 5,
+ FULL_DEOPT = 1 << 6
};
@@ -197,6 +198,8 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
+ bool IsFullDeopt();
+ void FullDeopt();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index fece9a09c..5e2a2b187 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -578,15 +578,22 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
}
+static Handle<Map> MapForNewFunction(Isolate *isolate,
+ Handle<SharedFunctionInfo> function_info) {
+ Context *context = isolate->context()->native_context();
+ int map_index = Context::FunctionMapIndex(function_info->language_mode(),
+ function_info->is_generator());
+ return Handle<Map>(Map::cast(context->get(map_index)));
+}
+
+
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
function_info,
- function_info->is_classic_mode()
- ? isolate()->function_map()
- : isolate()->strict_mode_function_map(),
+ MapForNewFunction(isolate(), function_info),
pretenure);
if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
@@ -874,14 +881,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
initial_map->set_constructor(*function);
}
- // Set function.prototype and give the prototype a constructor
- // property that refers to the function.
SetPrototypeProperty(function, prototype);
- // Currently safe because it is only invoked from Genesis.
- CHECK_NOT_EMPTY_HANDLE(isolate(),
- JSObject::SetLocalPropertyIgnoreAttributes(
- prototype, constructor_string(),
- function, DONT_ENUM));
return function;
}
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index ea72168b7..9c9362b2f 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -119,6 +119,22 @@ public:
};
#endif
+#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST)
+# define ENABLE_VFP3_DEFAULT true
+#else
+# define ENABLE_VFP3_DEFAULT false
+#endif
+#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST)
+# define ENABLE_ARMV7_DEFAULT true
+#else
+# define ENABLE_ARMV7_DEFAULT false
+#endif
+#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST)
+# define ENABLE_32DREGS_DEFAULT true
+#else
+# define ENABLE_32DREGS_DEFAULT false
+#endif
+
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
@@ -168,12 +184,12 @@ DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
"generate array elements transition stubs")
-DEFINE_bool(compiled_keyed_stores, false, "use optimizing compiler to "
+DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
"generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
-DEFINE_bool(pretenure_literals, false, "allocate literals in old space")
+DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@@ -308,12 +324,9 @@ DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, true,
- "enable use of VFP3 instructions if available - this implies "
- "enabling ARMv7 and VFP2 instructions (ARM only)")
-DEFINE_bool(enable_vfp2, true,
- "enable use of VFP2 instructions if available")
-DEFINE_bool(enable_armv7, true,
+DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
+ "enable use of VFP3 instructions if available")
+DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
@@ -322,10 +335,8 @@ DEFINE_bool(enable_movw_movt, false,
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
-DEFINE_bool(enable_32dregs, true,
+DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
-DEFINE_bool(enable_fpu, true,
- "enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
@@ -502,6 +513,8 @@ DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
// isolate.cc
+DEFINE_bool(abort_on_uncaught_exception, false,
+ "abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,
diff --git a/deps/v8/src/flags.cc b/deps/v8/src/flags.cc
index ff725adcf..282bf20ac 100644
--- a/deps/v8/src/flags.cc
+++ b/deps/v8/src/flags.cc
@@ -34,6 +34,9 @@
#include "smart-pointers.h"
#include "string-stream.h"
+#ifdef V8_TARGET_ARCH_ARM
+#include "arm/assembler-arm-inl.h"
+#endif
namespace v8 {
namespace internal {
@@ -305,7 +308,7 @@ static void SplitArgument(const char* arg,
// make a copy so we can NUL-terminate flag name
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
- memcpy(buffer, *name, n);
+ OS::MemCopy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
// get the value
@@ -367,8 +370,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// sense there.
continue;
} else {
- fprintf(stderr, "Error: unrecognized flag %s\n"
- "Try --help for options\n", arg);
+ PrintF(stderr, "Error: unrecognized flag %s\n"
+ "Try --help for options\n", arg);
return_code = j;
break;
}
@@ -381,9 +384,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
if (i < *argc) {
value = argv[i++];
} else {
- fprintf(stderr, "Error: missing value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
+ PrintF(stderr, "Error: missing value for flag %s of type %s\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()));
return_code = j;
break;
}
@@ -424,9 +427,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
(flag->type() != Flag::TYPE_BOOL && is_bool) ||
*endp != '\0') {
- fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
- "Try --help for options\n",
- arg, Type2String(flag->type()));
+ PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
+ "Try --help for options\n",
+ arg, Type2String(flag->type()));
return_code = j;
break;
}
@@ -475,7 +478,7 @@ static char* SkipBlackSpace(char* p) {
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
ScopedVector<char> copy0(len + 1);
- memcpy(copy0.start(), str, len);
+ OS::MemCopy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
@@ -517,6 +520,12 @@ void FlagList::ResetAllFlags() {
// static
void FlagList::PrintHelp() {
+#ifdef V8_TARGET_ARCH_ARM
+ CpuFeatures::PrintTarget();
+ CpuFeatures::Probe();
+ CpuFeatures::PrintFeatures();
+#endif // V8_TARGET_ARCH_ARM
+
printf("Usage:\n");
printf(" shell [options] -e string\n");
printf(" execute string in V8\n");
diff --git a/deps/v8/src/frames.h b/deps/v8/src/frames.h
index ca0d5bec9..11e8d2878 100644
--- a/deps/v8/src/frames.h
+++ b/deps/v8/src/frames.h
@@ -84,6 +84,18 @@ class InnerPointerToCodeCache {
};
+class StackHandlerConstants : public AllStatic {
+ public:
+ static const int kNextOffset = 0 * kPointerSize;
+ static const int kCodeOffset = 1 * kPointerSize;
+ static const int kStateOffset = 2 * kPointerSize;
+ static const int kContextOffset = 3 * kPointerSize;
+ static const int kFPOffset = 4 * kPointerSize;
+
+ static const int kSize = kFPOffset + kPointerSize;
+};
+
+
class StackHandler BASE_EMBEDDED {
public:
enum Kind {
@@ -581,7 +593,6 @@ class JavaScriptFrame: public StandardFrame {
inline Object* function_slot_object() const;
friend class StackFrameIterator;
- friend class StackTracer;
};
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 1c6a0b91b..72d083584 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -322,7 +322,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
ASSERT(!isolate->has_pending_exception());
return false;
}
- unsigned table_offset = cgen.EmitStackCheckTable();
+ unsigned table_offset = cgen.EmitBackEdgeTable();
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
@@ -341,8 +341,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
- code->set_stack_check_table_offset(table_offset);
- code->set_stack_check_patched_for_osr(false);
+ code->set_back_edge_table_offset(table_offset);
+ code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
@@ -362,17 +362,18 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
-unsigned FullCodeGenerator::EmitStackCheckTable() {
- // The stack check table consists of a length (in number of entries)
+unsigned FullCodeGenerator::EmitBackEdgeTable() {
+ // The back edge table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
masm()->Align(kIntSize);
unsigned offset = masm()->pc_offset();
- unsigned length = stack_checks_.length();
+ unsigned length = back_edges_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
- __ dd(stack_checks_[i].id.ToInt());
- __ dd(stack_checks_[i].pc_and_state);
+ __ dd(back_edges_[i].id.ToInt());
+ __ dd(back_edges_[i].pc);
+ __ db(back_edges_[i].loop_depth);
}
return offset;
}
@@ -478,8 +479,11 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
- BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
- stack_checks_.Add(entry, zone());
+ ASSERT(loop_depth() > 0);
+ uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
+ BackEdgeEntry entry =
+ { ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
+ back_edges_.Add(entry, zone());
}
@@ -1251,7 +1255,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
- Label body, stack_check;
+ Label body, book_keeping;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@@ -1265,13 +1269,13 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
SetExpressionPosition(stmt->cond(), stmt->condition_position());
VisitForControl(stmt->cond(),
- &stack_check,
+ &book_keeping,
loop_statement.break_label(),
- &stack_check);
+ &book_keeping);
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
- __ bind(&stack_check);
+ __ bind(&book_keeping);
EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
@@ -1549,6 +1553,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
UNIMPLEMENTED();
Comment cmnt(masm_, "[ Yield");
+ // TODO(wingo): Actually update the iterator state.
+ VisitForEffect(expr->generator_object());
VisitForAccumulatorValue(expr->expression());
// TODO(wingo): Assert that the operand stack depth is 0, at least while
// general yield expressions are unimplemented.
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index c4f3fcc72..b9647c2ea 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -92,7 +92,7 @@ class FullCodeGenerator: public AstVisitor {
bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
- stack_checks_(2, info->zone()), // There's always at least one.
+ back_edges_(2, info->zone()),
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
@@ -135,6 +135,7 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
+ static const int kBackEdgeEntrySize = 2 * kIntSize + kOneByteSize;
private:
class Breakable;
@@ -459,9 +460,9 @@ class FullCodeGenerator: public AstVisitor {
Label* back_edge_target);
// Record the OSR AST id corresponding to a back edge in the code.
void RecordBackEdge(BailoutId osr_ast_id);
- // Emit a table of stack check ids and pcs into the code stream. Return
- // the offset of the start of the table.
- unsigned EmitStackCheckTable();
+ // Emit a table of back edge ids, pcs and loop depths into the code stream.
+ // Return the offset of the start of the table.
+ unsigned EmitBackEdgeTable();
void EmitProfilingCounterDecrement(int delta);
void EmitProfilingCounterReset();
@@ -624,6 +625,12 @@ class FullCodeGenerator: public AstVisitor {
unsigned pc_and_state;
};
+ struct BackEdgeEntry {
+ BailoutId id;
+ unsigned pc;
+ uint8_t loop_depth;
+ };
+
struct TypeFeedbackCellEntry {
TypeFeedbackId ast_id;
Handle<JSGlobalPropertyCell> cell;
@@ -818,9 +825,7 @@ class FullCodeGenerator: public AstVisitor {
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
- // TODO(svenpanne) Rename this to something like back_edges_ and rename
- // related functions accordingly.
- ZoneList<BailoutEntry> stack_checks_;
+ ZoneList<BackEdgeEntry> back_edges_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;
diff --git a/deps/v8/src/gdb-jit.cc b/deps/v8/src/gdb-jit.cc
index cf8ca2d4d..d08f2fe41 100644
--- a/deps/v8/src/gdb-jit.cc
+++ b/deps/v8/src/gdb-jit.cc
@@ -629,7 +629,7 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
- ELF(Zone* zone) : sections_(6, zone) {
+ explicit ELF(Zone* zone) : sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
}
@@ -681,7 +681,7 @@ class ELF BASE_EMBEDDED {
#else
#error Unsupported target architecture.
#endif
- memcpy(header->ident, ident, 16);
+ OS::MemCopy(header->ident, ident, 16);
header->type = 1;
#if defined(V8_TARGET_ARCH_IA32)
header->machine = 3;
@@ -1019,9 +1019,9 @@ class CodeDescription BASE_EMBEDDED {
#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
+ Zone* zone,
ELF* elf,
int text_section_index) {
- Zone* zone = desc->info()->zone();
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
@@ -1213,8 +1213,11 @@ class DebugInfoSection : public DebugSection {
w->WriteSLEB128(StandardFrameConstants::kContextOffset);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
+
+ w->WriteULEB128(0); // Terminate the sub program.
}
+ w->WriteULEB128(0); // Terminate the compile unit.
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@@ -1324,15 +1327,14 @@ class DebugAbbrevSection : public DebugSection {
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int locals = scope->StackLocalCount();
- int total_children =
- params + slots + context_slots + internal_slots + locals + 2;
+ // Total children is params + slots + context_slots + internal_slots +
+ // locals + 2 (__function and __context).
// The extra duplication below seems to be necessary to keep
// gdb from getting upset on OSX.
w->WriteULEB128(current_abbreviation++); // Abbreviation code.
w->WriteULEB128(DW_TAG_SUBPROGRAM);
- w->Write<uint8_t>(
- total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
+ w->Write<uint8_t>(DW_CHILDREN_YES);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
@@ -1384,9 +1386,7 @@ class DebugAbbrevSection : public DebugSection {
// The context.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
- if (total_children != 0) {
- w->WriteULEB128(0); // Terminate the sibling list.
- }
+ w->WriteULEB128(0); // Terminate the sibling list.
}
w->WriteULEB128(0); // Terminate the table.
@@ -1789,8 +1789,9 @@ bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
#endif // V8_TARGET_ARCH_X64
-static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
- Zone* zone = desc->info()->zone();
+static void CreateDWARFSections(CodeDescription* desc,
+ Zone* zone,
+ DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
obj->AddSection(new(zone) DebugInfoSection(desc), zone);
obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
@@ -1841,7 +1842,7 @@ extern "C" {
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(MaybeObject* object) {
object->Print();
- fprintf(stdout, "\n");
+ PrintF(stdout, "\n");
}
#endif
}
@@ -1854,7 +1855,7 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
- memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
+ OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
entry->prev_ = entry->next_ = NULL;
@@ -1915,8 +1916,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
}
-static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
- Zone* zone = desc->info()->zone();
+static JITCodeEntry* CreateELFObject(CodeDescription* desc, Zone* zone) {
ZoneScope zone_scope(zone, DELETE_ON_EXIT);
#ifdef __MACH_O
MachO mach_o;
@@ -1944,9 +1944,9 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
zone);
- CreateSymbolsTable(desc, &elf, text_section_index);
+ CreateSymbolsTable(desc, zone, &elf, text_section_index);
- CreateDWARFSections(desc, &elf);
+ CreateDWARFSections(desc, zone, &elf);
elf.Write(&w);
#endif
@@ -2083,7 +2083,8 @@ void GDBJITInterface::AddCode(const char* name,
}
AddUnwindInfo(&code_desc);
- JITCodeEntry* entry = CreateELFObject(&code_desc);
+ Zone* zone = code->GetIsolate()->runtime_zone();
+ JITCodeEntry* entry = CreateELFObject(&code_desc, zone);
ASSERT(!IsLineInfoTagged(entry));
delete lineinfo;
diff --git a/deps/v8/src/generator.js b/deps/v8/src/generator.js
new file mode 100644
index 000000000..d579928cb
--- /dev/null
+++ b/deps/v8/src/generator.js
@@ -0,0 +1,74 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"use strict";
+
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Function = global.Function;
+
+// ----------------------------------------------------------------------------
+
+
+// TODO(wingo): Give link to specification. For now, the following diagram is
+// the spec:
+// http://wiki.ecmascript.org/lib/exe/fetch.php?cache=cache&media=harmony:es6_generator_object_model_3-29-13.png
+
+function GeneratorObjectNext() {
+ // TODO(wingo): Implement.
+}
+
+function GeneratorObjectSend(value) {
+ // TODO(wingo): Implement.
+}
+
+function GeneratorObjectThrow(exn) {
+ // TODO(wingo): Implement.
+}
+
+function GeneratorObjectClose() {
+ // TODO(wingo): Implement.
+}
+
+function SetUpGenerators() {
+ %CheckIsBootstrapping();
+ var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
+ InstallFunctions(GeneratorObjectPrototype,
+ DONT_ENUM | DONT_DELETE | READ_ONLY,
+ ["next", GeneratorObjectNext,
+ "send", GeneratorObjectSend,
+ "throw", GeneratorObjectThrow,
+ "close", GeneratorObjectClose]);
+ %SetProperty(GeneratorObjectPrototype, "constructor",
+ GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
+ %SetProperty(GeneratorFunctionPrototype, "constructor",
+ GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
+ %SetPrototype(GeneratorFunction, $Function);
+}
+
+SetUpGenerators();
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 990014467..90707b0bc 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -31,10 +31,15 @@
#include "../include/v8-profiler.h"
#include "list.h"
+#include "v8utils.h"
namespace v8 {
namespace internal {
+class GCTracer;
+class HeapStats;
+class ObjectVisitor;
+
// Structure for tracking global handles.
// A single list keeps all the allocated global handles.
// Destroyed handles stay in the list but is added to the free list.
@@ -88,7 +93,7 @@ class ImplicitRefGroup {
malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
group->parent_ = parent;
group->length_ = length;
- CopyWords(group->children_, children, static_cast<int>(length));
+ CopyWords(group->children_, children, length);
return group;
}
diff --git a/deps/v8/src/globals.h b/deps/v8/src/globals.h
index 1606996d2..59931bf5d 100644
--- a/deps/v8/src/globals.h
+++ b/deps/v8/src/globals.h
@@ -67,9 +67,21 @@ namespace internal {
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
+#if defined(__native_client__)
+// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
+// generates ARM machine code, together with a portable ARM simulator
+// compiled for the host architecture in question.
+//
+// Since Native Client is ILP-32 on all architectures we use
+// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
+#define V8_HOST_ARCH_IA32 1
+#define V8_HOST_ARCH_32_BIT 1
+#define V8_HOST_CAN_READ_UNALIGNED 1
+#else
#define V8_HOST_ARCH_X64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
+#endif // __native_client__
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
diff --git a/deps/v8/src/handles-inl.h b/deps/v8/src/handles-inl.h
index b763c86a7..bb113110c 100644
--- a/deps/v8/src/handles-inl.h
+++ b/deps/v8/src/handles-inl.h
@@ -59,7 +59,6 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
if (FLAG_enable_slow_asserts) {
Isolate* isolate = Isolate::Current();
CHECK(isolate->AllowHandleDereference() ||
- Heap::RelocationLock::IsLocked(isolate->heap()) ||
!isolate->optimizing_compiler_thread()->IsOptimizerThread());
}
#endif // DEBUG
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index b24a4cd1d..059ff2486 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -252,15 +252,32 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
}
+Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key) {
+ Isolate* isolate = object->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::DeleteObjectProperty(
+ isolate, object, key, JSReceiver::NORMAL_DELETION),
+ Object);
+}
+
+
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
- Runtime::ForceDeleteObjectProperty(isolate, object, key),
+ Runtime::DeleteObjectProperty(
+ isolate, object, key, JSReceiver::FORCE_DELETION),
Object);
}
+Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key) {
+ Isolate* isolate = obj->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ Runtime::HasObjectProperty(isolate, obj, key), Object);
+}
+
+
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
@@ -308,6 +325,14 @@ Handle<JSObject> Copy(Handle<JSObject> obj) {
}
+Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
+ Isolate* isolate = obj->GetIsolate();
+ CALL_HEAP_FUNCTION(isolate,
+ obj->DeepCopy(isolate),
+ JSObject);
+}
+
+
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
}
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index c69713e44..8e9404cb9 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -223,11 +223,13 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes);
-Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
- Handle<Object> key);
+Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key);
-Handle<Object> GetProperty(Handle<JSReceiver> obj,
- const char* name);
+Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key);
+
+Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key);
+
+Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name);
Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj,
@@ -240,6 +242,8 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
Handle<JSObject> Copy(Handle<JSObject> obj);
+Handle<JSObject> DeepCopy(Handle<JSObject> obj);
+
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index a15b8ef48..28e50aa8b 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -159,8 +159,8 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqOneByteString::kHeaderSize,
- str.start(), str.length());
+ OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize,
+ str.start(), str.length());
return answer;
}
@@ -192,8 +192,8 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
- str.start(), str.length() * kUC16Size);
+ OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize,
+ str.start(), str.length() * kUC16Size);
return answer;
}
@@ -345,6 +345,16 @@ bool Heap::InOldPointerSpace(Object* object) {
}
+bool Heap::InOldDataSpace(Address address) {
+ return old_data_space_->Contains(address);
+}
+
+
+bool Heap::InOldDataSpace(Object* object) {
+ return InOldDataSpace(reinterpret_cast<Address>(object));
+}
+
+
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
@@ -417,7 +427,7 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
- byte_size / kPointerSize);
+ static_cast<size_t>(byte_size / kPointerSize));
}
@@ -435,7 +445,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
*dst_slot++ = *src_slot++;
}
} else {
- memmove(dst, src, byte_size);
+ OS::MemMove(dst, src, static_cast<size_t>(byte_size));
}
}
diff --git a/deps/v8/src/heap-snapshot-generator.cc b/deps/v8/src/heap-snapshot-generator.cc
index fb239aa3c..d22239f38 100644
--- a/deps/v8/src/heap-snapshot-generator.cc
+++ b/deps/v8/src/heap-snapshot-generator.cc
@@ -2319,7 +2319,7 @@ class OutputStreamWriter {
int s_chunk_size = Min(
chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
ASSERT(s_chunk_size > 0);
- memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
+ OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
MaybeWriteChunk();
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index fafcb64d3..453d98564 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -162,8 +162,7 @@ Heap::Heap()
#endif
promotion_queue_(this),
configured_(false),
- chunks_queued_for_free_(NULL),
- relocation_mutex_(NULL) {
+ chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@@ -688,9 +687,9 @@ void Heap::MoveElements(FixedArray* array,
ASSERT(array->map() != HEAP->fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
- memmove(dst_objects,
- array->data_start() + src_index,
- len * kPointerSize);
+ OS::MemMove(dst_objects,
+ array->data_start() + src_index,
+ len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
// TODO(hpayer): check store buffer for entries
@@ -952,6 +951,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
+ // Support for global pre-tenuring uses the high promotion mode as a
+ // heuristic indicator of whether to pretenure or not, we trigger
+ // deoptimization here to take advantage of pre-tenuring as soon as
+ // possible.
+ if (FLAG_pretenure_literals) {
+ isolate_->stack_guard()->FullDeopt();
+ }
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
IsLowSurvivalRate()) {
@@ -963,6 +969,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
+ // Trigger deoptimization here to turn off pre-tenuring as soon as
+ // possible.
+ if (FLAG_pretenure_literals) {
+ isolate_->stack_guard()->FullDeopt();
+ }
}
if (new_space_high_promotion_mode_active_ &&
@@ -1282,8 +1293,6 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
- RelocationLock relocation_lock(this);
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@@ -2836,13 +2845,6 @@ bool Heap::CreateInitialObjects() {
}
hidden_string_ = String::cast(obj);
- // Allocate the foreign for __proto__.
- { MaybeObject* maybe_obj =
- AllocateForeign((Address) &Accessors::ObjectPrototype);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_prototype_accessors(Foreign::cast(obj));
-
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
@@ -3963,30 +3965,36 @@ void Heap::InitializeFunction(JSFunction* function,
MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
- // Allocate the prototype. Make sure to use the object function
- // from the function's context, since the function can be from a
- // different context.
- JSFunction* object_function =
- function->context()->native_context()->object_function();
-
- // Each function prototype gets a copy of the object function map.
- // This avoid unwanted sharing of maps between prototypes of different
- // constructors.
+ // Make sure to use globals from the function's context, since the function
+ // can be from a different context.
+ Context* native_context = function->context()->native_context();
+ bool needs_constructor_property;
Map* new_map;
- ASSERT(object_function->has_initial_map());
- MaybeObject* maybe_map = object_function->initial_map()->Copy();
- if (!maybe_map->To(&new_map)) return maybe_map;
+ if (function->shared()->is_generator()) {
+ // Generator prototypes can share maps since they don't have "constructor"
+ // properties.
+ new_map = native_context->generator_object_prototype_map();
+ needs_constructor_property = false;
+ } else {
+ // Each function prototype gets a fresh map to avoid unwanted sharing of
+ // maps between prototypes of different constructors.
+ JSFunction* object_function = native_context->object_function();
+ ASSERT(object_function->has_initial_map());
+ MaybeObject* maybe_map = object_function->initial_map()->Copy();
+ if (!maybe_map->To(&new_map)) return maybe_map;
+ needs_constructor_property = true;
+ }
Object* prototype;
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
- // When creating the prototype for the function we must set its
- // constructor to the function.
- MaybeObject* maybe_failure =
- JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
- constructor_string(), function, DONT_ENUM);
- if (maybe_failure->IsFailure()) return maybe_failure;
+ if (needs_constructor_property) {
+ MaybeObject* maybe_failure =
+ JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
+ constructor_string(), function, DONT_ENUM);
+ if (maybe_failure->IsFailure()) return maybe_failure;
+ }
return prototype;
}
@@ -4086,10 +4094,20 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// First create a new map with the size and number of in-object properties
// suggested by the function.
- int instance_size = fun->shared()->CalculateInstanceSize();
- int in_object_properties = fun->shared()->CalculateInObjectProperties();
+ InstanceType instance_type;
+ int instance_size;
+ int in_object_properties;
+ if (fun->shared()->is_generator()) {
+ instance_type = JS_GENERATOR_OBJECT_TYPE;
+ instance_size = JSGeneratorObject::kSize;
+ in_object_properties = 0;
+ } else {
+ instance_type = JS_OBJECT_TYPE;
+ instance_size = fun->shared()->CalculateInstanceSize();
+ in_object_properties = fun->shared()->CalculateInObjectProperties();
+ }
Map* map;
- MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
+ MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
@@ -4111,7 +4129,8 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
- if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
+ if (instance_type == JS_OBJECT_TYPE &&
+ fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
// Inline constructor can only handle inobject properties.
@@ -4144,7 +4163,9 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
}
}
- fun->shared()->StartInobjectSlackTracking(map);
+ if (instance_type == JS_OBJECT_TYPE) {
+ fun->shared()->StartInobjectSlackTracking(map);
+ }
return map;
}
@@ -4327,6 +4348,22 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
}
+MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
+ ASSERT(function->shared()->is_generator());
+ Map *map;
+ if (function->has_initial_map()) {
+ map = function->initial_map();
+ } else {
+ // Allocate the initial map if absent.
+ MaybeObject* maybe_map = AllocateInitialMap(function);
+ if (!maybe_map->To(&map)) return maybe_map;
+ function->set_initial_map(map);
+ }
+ ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+ return AllocateJSObjectFromMap(map);
+}
+
+
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@@ -4945,7 +4982,7 @@ static inline void WriteOneByteData(Vector<const char> vector,
int len) {
// Only works for ascii.
ASSERT(vector.length() == len);
- memcpy(chars, vector.start(), len);
+ OS::MemCopy(chars, vector.start(), len);
}
static inline void WriteTwoByteData(Vector<const char> vector,
@@ -6588,11 +6625,6 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
- if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
-#ifdef DEBUG
- relocation_mutex_locked_ = false;
-#endif // DEBUG
-
return true;
}
@@ -6695,8 +6727,6 @@ void Heap::TearDown() {
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
-
- delete relocation_mutex_;
}
@@ -7821,8 +7851,8 @@ void Heap::CheckpointObjectStats() {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
- memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
- memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+ OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+ OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 8992e318e..7b4b70d61 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -150,7 +150,6 @@ namespace internal {
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
- V(Foreign, prototype_accessors, PrototypeAccessors) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
@@ -210,9 +209,11 @@ namespace internal {
V(index_string, "index") \
V(last_index_string, "lastIndex") \
V(object_string, "object") \
+ V(payload_string, "payload") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
+ V(unknown_field_string, "unknownField") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \
@@ -601,6 +602,13 @@ class Heap {
return old_pointer_space_->allocation_limit_address();
}
+ Address* OldDataSpaceAllocationTopAddress() {
+ return old_data_space_->allocation_top_address();
+ }
+ Address* OldDataSpaceAllocationLimitAddress() {
+ return old_data_space_->allocation_limit_address();
+ }
+
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
@@ -617,6 +625,9 @@ class Heap {
JSFunction* constructor,
Handle<Object> allocation_site_info_payload);
+ MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
+ JSFunction* function);
+
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@@ -1329,6 +1340,10 @@ class Heap {
inline bool InOldPointerSpace(Address address);
inline bool InOldPointerSpace(Object* object);
+ // Returns whether the object resides in old data space.
+ inline bool InOldDataSpace(Address address);
+ inline bool InOldDataSpace(Object* object);
+
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
bool Contains(Address addr);
@@ -1497,6 +1512,12 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
+ // Predicate that governs global pre-tenuring decisions based on observed
+ // promotion rates of previous collections.
+ inline bool ShouldGloballyPretenure() {
+ return new_space_high_promotion_mode_active_;
+ }
+
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
@@ -1825,38 +1846,6 @@ class Heap {
void CheckpointObjectStats();
- // We don't use a ScopedLock here since we want to lock the heap
- // only when FLAG_parallel_recompilation is true.
- class RelocationLock {
- public:
- explicit RelocationLock(Heap* heap) : heap_(heap) {
- if (FLAG_parallel_recompilation) {
- heap_->relocation_mutex_->Lock();
-#ifdef DEBUG
- heap_->relocation_mutex_locked_ = true;
-#endif // DEBUG
- }
- }
-
- ~RelocationLock() {
- if (FLAG_parallel_recompilation) {
-#ifdef DEBUG
- heap_->relocation_mutex_locked_ = false;
-#endif // DEBUG
- heap_->relocation_mutex_->Unlock();
- }
- }
-
-#ifdef DEBUG
- static bool IsLocked(Heap* heap) {
- return heap->relocation_mutex_locked_;
- }
-#endif // DEBUG
-
- private:
- Heap* heap_;
- };
-
private:
Heap();
@@ -2326,11 +2315,6 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
- Mutex* relocation_mutex_;
-#ifdef DEBUG
- bool relocation_mutex_locked_;
-#endif // DEBUG;
-
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index edbffc2e2..60a691265 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -685,7 +685,7 @@ void HValue::Kill() {
HValue* operand = OperandAt(i);
if (operand == NULL) continue;
HUseListNode* first = operand->use_list_;
- if (first != NULL && first->value() == this && first->index() == i) {
+ if (first != NULL && first->value()->CheckFlag(kIsDead)) {
operand->use_list_ = first->tail();
}
}
@@ -806,6 +806,9 @@ void HInstruction::PrintTo(StringStream* stream) {
PrintRangeTo(stream);
PrintChangesTo(stream);
PrintTypeTo(stream);
+ if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
+ stream->Add(" [noOSE]");
+ }
}
@@ -1581,10 +1584,10 @@ void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
// for which the map is known.
if (HasNoUses() && dominator->IsStoreNamedField()) {
HStoreNamedField* store = HStoreNamedField::cast(dominator);
- Handle<Map> map = store->transition();
- if (map.is_null() || store->object() != value()) return;
+ UniqueValueId map_unique_id = store->transition_unique_id();
+ if (!map_unique_id.IsInitialized() || store->object() != value()) return;
for (int i = 0; i < map_set()->length(); i++) {
- if (map.is_identical_to(map_set()->at(i))) {
+ if (map_unique_id == map_unique_ids_.at(i)) {
DeleteAndReplaceWith(NULL);
return;
}
@@ -1980,20 +1983,25 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
-void HSimulate::MergeInto(HSimulate* other) {
- for (int i = 0; i < values_.length(); ++i) {
- HValue* value = values_[i];
- if (HasAssignedIndexAt(i)) {
- other->AddAssignedValue(GetAssignedIndexAt(i), value);
- } else {
- if (other->pop_count_ > 0) {
- other->pop_count_--;
+void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
+ while (!list->is_empty()) {
+ HSimulate* from = list->RemoveLast();
+ ZoneList<HValue*>* from_values = &from->values_;
+ for (int i = 0; i < from_values->length(); ++i) {
+ if (from->HasAssignedIndexAt(i)) {
+ AddAssignedValue(from->GetAssignedIndexAt(i),
+ from_values->at(i));
} else {
- other->AddPushedValue(value);
+ if (pop_count_ > 0) {
+ pop_count_--;
+ } else {
+ AddPushedValue(from_values->at(i));
+ }
}
}
+ pop_count_ += from->pop_count_;
+ from->DeleteAndReplaceWith(NULL);
}
- other->pop_count_ += pop_count();
}
@@ -2039,6 +2047,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
+ unique_id_(),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
@@ -2067,11 +2076,13 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
HConstant::HConstant(Handle<Object> handle,
+ UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalize_string,
bool boolean_value)
: handle_(handle),
+ unique_id_(unique_id),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
@@ -2087,7 +2098,9 @@ HConstant::HConstant(Handle<Object> handle,
HConstant::HConstant(int32_t integer_value,
Representation r,
Handle<Object> optional_handle)
- : has_int32_value_(true),
+ : handle_(optional_handle),
+ unique_id_(),
+ has_int32_value_(true),
has_double_value_(true),
is_internalized_string_(false),
boolean_value_(integer_value != 0),
@@ -2100,7 +2113,9 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
Handle<Object> optional_handle)
- : has_int32_value_(IsInteger32(double_value)),
+ : handle_(optional_handle),
+ unique_id_(),
+ has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
boolean_value_(double_value != 0 && !isnan(double_value)),
@@ -2125,8 +2140,12 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_);
if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_);
ASSERT(!handle_.is_null());
- return new(zone) HConstant(
- handle_, r, type_from_value_, is_internalized_string_, boolean_value_);
+ return new(zone) HConstant(handle_,
+ unique_id_,
+ r,
+ type_from_value_,
+ is_internalized_string_,
+ boolean_value_);
}
@@ -2451,6 +2470,8 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
Zone* zone)
: types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
+ types_unique_ids_(0, zone),
+ name_unique_id_(),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
@@ -2517,15 +2538,39 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
}
+void HCheckMaps::FinalizeUniqueValueId() {
+ if (!map_unique_ids_.is_empty()) return;
+ Zone* zone = block()->zone();
+ map_unique_ids_.Initialize(map_set_.length(), zone);
+ for (int i = 0; i < map_set_.length(); i++) {
+ map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone);
+ }
+}
+
+
+void HLoadNamedFieldPolymorphic::FinalizeUniqueValueId() {
+ if (!types_unique_ids_.is_empty()) return;
+ Zone* zone = block()->zone();
+ types_unique_ids_.Initialize(types_.length(), zone);
+ for (int i = 0; i < types_.length(); i++) {
+ types_unique_ids_.Add(UniqueValueId(types_.at(i)), zone);
+ }
+ name_unique_id_ = UniqueValueId(name_);
+}
+
+
bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
+ ASSERT_EQ(types_.length(), types_unique_ids_.length());
HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
- if (types_.length() != other->types()->length()) return false;
- if (!name_.is_identical_to(other->name())) return false;
+ if (name_unique_id_ != other->name_unique_id_) return false;
+ if (types_unique_ids_.length() != other->types_unique_ids_.length()) {
+ return false;
+ }
if (need_generic_ != other->need_generic_) return false;
- for (int i = 0; i < types_.length(); i++) {
+ for (int i = 0; i < types_unique_ids_.length(); i++) {
bool found = false;
- for (int j = 0; j < types_.length(); j++) {
- if (types_.at(j).is_identical_to(other->types()->at(i))) {
+ for (int j = 0; j < types_unique_ids_.length(); j++) {
+ if (types_unique_ids_.at(j) == other->types_unique_ids_.at(i)) {
found = true;
break;
}
@@ -2916,12 +2961,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
-HType HFastLiteral::CalculateInferredType() {
- // TODO(mstarzinger): Be smarter, could also be JSArray here.
- return HType::JSObject();
-}
-
-
HType HArrayLiteral::CalculateInferredType() {
return HType::JSArray();
}
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index ad0368718..6853dfe10 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -111,7 +111,6 @@ class LChunkBuilder;
V(DummyUse) \
V(ElementsKind) \
V(EnterInlined) \
- V(FastLiteral) \
V(FixedArrayBaseLength) \
V(ForceRepresentation) \
V(FunctionLiteral) \
@@ -235,14 +234,6 @@ class LChunkBuilder;
virtual Opcode opcode() const { return HValue::k##type; }
-#ifdef DEBUG
-#define ASSERT_ALLOCATION_DISABLED \
- ASSERT(isolate()->optimizing_compiler_thread()->IsOptimizerThread() || \
- !isolate()->heap()->IsAllocationAllowed())
-#else
-#define ASSERT_ALLOCATION_DISABLED do {} while (0)
-#endif
-
class Range: public ZoneObject {
public:
Range()
@@ -365,6 +356,48 @@ class Representation {
};
+class UniqueValueId {
+ public:
+ UniqueValueId() : raw_address_(NULL) { }
+
+ explicit UniqueValueId(Object* object) {
+ raw_address_ = reinterpret_cast<Address>(object);
+ ASSERT(IsInitialized());
+ }
+
+ explicit UniqueValueId(Handle<Object> handle) {
+ static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
+ if (handle.is_null()) {
+ raw_address_ = kEmptyHandleSentinel;
+ } else {
+ raw_address_ = reinterpret_cast<Address>(*handle);
+ ASSERT_NE(kEmptyHandleSentinel, raw_address_);
+ }
+ ASSERT(IsInitialized());
+ }
+
+ bool IsInitialized() const { return raw_address_ != NULL; }
+
+ bool operator==(const UniqueValueId& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
+ return raw_address_ == other.raw_address_;
+ }
+
+ bool operator!=(const UniqueValueId& other) const {
+ ASSERT(IsInitialized() && other.IsInitialized());
+ return raw_address_ != other.raw_address_;
+ }
+
+ intptr_t Hashcode() const {
+ ASSERT(IsInitialized());
+ return reinterpret_cast<intptr_t>(raw_address_);
+ }
+
+ private:
+ Address raw_address_;
+};
+
+
class HType {
public:
HType() : type_(kUninitialized) { }
@@ -829,6 +862,7 @@ class HValue: public ZoneObject {
// This flag is set to true after the SetupInformativeDefinitions() pass
// has processed this instruction.
kIDefsProcessingDone,
+ kHasNoObservableSideEffects,
kLastFlag = kIDefsProcessingDone
};
@@ -1006,7 +1040,8 @@ class HValue: public ZoneObject {
return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
- return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
+ return !CheckFlag(kHasNoObservableSideEffects) &&
+ gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet DependsOnFlags() const {
@@ -1056,6 +1091,9 @@ class HValue: public ZoneObject {
bool Equals(HValue* other);
virtual intptr_t Hashcode();
+ // Compute unique ids upfront that is safe wrt GC and parallel recompilation.
+ virtual void FinalizeUniqueValueId() { }
+
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
@@ -1830,7 +1868,7 @@ class HSimulate: public HInstruction {
return Representation::None();
}
- void MergeInto(HSimulate* other);
+ void MergeWith(ZoneList<HSimulate*>* list);
bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
DECLARE_CONCRETE_INSTRUCTION(Simulate)
@@ -2642,7 +2680,8 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMaps: public HTemplateInstruction<2> {
public:
HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
- HValue* typecheck = NULL) {
+ HValue* typecheck = NULL)
+ : map_unique_ids_(0, zone) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@@ -2654,7 +2693,8 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnElementsKind);
map_set()->Add(map, zone);
}
- HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
+ HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone)
+ : map_unique_ids_(0, zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
@@ -2701,28 +2741,36 @@ class HCheckMaps: public HTemplateInstruction<2> {
HValue* value() { return OperandAt(0); }
SmallMapList* map_set() { return &map_set_; }
+ virtual void FinalizeUniqueValueId();
+
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) {
+ ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
HCheckMaps* b = HCheckMaps::cast(other);
// Relies on the fact that map_set has been sorted before.
- if (map_set()->length() != b->map_set()->length()) return false;
- for (int i = 0; i < map_set()->length(); i++) {
- if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
+ if (map_unique_ids_.length() != b->map_unique_ids_.length()) {
+ return false;
+ }
+ for (int i = 0; i < map_unique_ids_.length(); i++) {
+ if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) {
+ return false;
+ }
}
return true;
}
private:
SmallMapList map_set_;
+ ZoneList<UniqueValueId> map_unique_ids_;
};
class HCheckFunction: public HUnaryOperation {
public:
HCheckFunction(HValue* value, Handle<JSFunction> function)
- : HUnaryOperation(value), target_(function) {
+ : HUnaryOperation(value), target_(function), target_unique_id_() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
@@ -2738,6 +2786,10 @@ class HCheckFunction: public HUnaryOperation {
virtual void Verify();
#endif
+ virtual void FinalizeUniqueValueId() {
+ target_unique_id_ = UniqueValueId(target_);
+ }
+
Handle<JSFunction> target() const { return target_; }
bool target_in_new_space() const { return target_in_new_space_; }
@@ -2746,11 +2798,12 @@ class HCheckFunction: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) {
HCheckFunction* b = HCheckFunction::cast(other);
- return target_.is_identical_to(b->target());
+ return target_unique_id_ == b->target_unique_id_;
}
private:
Handle<JSFunction> target_;
+ UniqueValueId target_unique_id_;
bool target_in_new_space_;
};
@@ -2855,7 +2908,11 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
public:
HCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder,
- Zone* zone) : prototypes_(2, zone), maps_(2, zone) {
+ Zone* zone)
+ : prototypes_(2, zone),
+ maps_(2, zone),
+ first_prototype_unique_id_(),
+ last_prototype_unique_id_() {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// Keep a list of all objects on the prototype chain up to the holder
@@ -2881,18 +2938,13 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- // Dereferencing to use the object's raw address for hashing is safe.
- HandleDereferenceGuard allow_handle_deref(isolate(),
- HandleDereferenceGuard::ALLOW);
- SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
- !isolate()->optimizing_compiler_thread()->IsOptimizerThread());
- intptr_t hash = 0;
- for (int i = 0; i < prototypes_.length(); i++) {
- hash = 17 * hash + reinterpret_cast<intptr_t>(*prototypes_[i]);
- hash = 17 * hash + reinterpret_cast<intptr_t>(*maps_[i]);
- }
- return hash;
+ return first_prototype_unique_id_.Hashcode() * 17 +
+ last_prototype_unique_id_.Hashcode();
+ }
+
+ virtual void FinalizeUniqueValueId() {
+ first_prototype_unique_id_ = UniqueValueId(prototypes_.first());
+ last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
}
bool CanOmitPrototypeChecks() {
@@ -2905,22 +2957,15 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
-#ifdef DEBUG
- if (prototypes_.length() != b->prototypes()->length()) return false;
- for (int i = 0; i < prototypes_.length(); i++) {
- if (!prototypes_[i].is_identical_to(b->prototypes()->at(i))) return false;
- if (!maps_[i].is_identical_to(b->maps()->at(i))) return false;
- }
- return true;
-#else
- return prototypes_.first().is_identical_to(b->prototypes()->first()) &&
- prototypes_.last().is_identical_to(b->prototypes()->last());
-#endif // DEBUG
+ return first_prototype_unique_id_ == b->first_prototype_unique_id_ &&
+ last_prototype_unique_id_ == b->last_prototype_unique_id_;
}
private:
ZoneList<Handle<JSObject> > prototypes_;
ZoneList<Handle<Map> > maps_;
+ UniqueValueId first_prototype_unique_id_;
+ UniqueValueId last_prototype_unique_id_;
};
@@ -3175,6 +3220,7 @@ class HConstant: public HTemplateInstruction<0> {
Representation r,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
+ UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalized_string,
@@ -3188,35 +3234,36 @@ class HConstant: public HTemplateInstruction<0> {
return handle_;
}
- bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
+ bool IsSpecialDouble() const {
+ return has_double_value_ &&
+ (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
+ FixedDoubleArray::is_the_hole_nan(double_value_) ||
+ isnan(double_value_));
+ }
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
}
if (has_double_value_) {
- if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
- isnan(double_value_)) {
+ if (IsSpecialDouble()) {
return true;
}
return false;
}
ASSERT(!handle_.is_null());
+ HandleDereferenceGuard allow_dereference_for_immovable_check(
+ isolate(), HandleDereferenceGuard::ALLOW);
Heap* heap = isolate()->heap();
- // We should have handled minus_zero_value and nan_value in the
- // has_double_value_ clause above.
- // Dereferencing is safe to compare against immovable singletons.
- HandleDereferenceGuard allow_handle_deref(isolate(),
- HandleDereferenceGuard::ALLOW);
- ASSERT(*handle_ != heap->minus_zero_value());
- ASSERT(*handle_ != heap->nan_value());
- return *handle_ == heap->undefined_value() ||
- *handle_ == heap->null_value() ||
- *handle_ == heap->true_value() ||
- *handle_ == heap->false_value() ||
- *handle_ == heap->the_hole_value() ||
- *handle_ == heap->empty_string();
+ ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value()));
+ ASSERT(unique_id_ != UniqueValueId(heap->nan_value()));
+ return unique_id_ == UniqueValueId(heap->undefined_value()) ||
+ unique_id_ == UniqueValueId(heap->null_value()) ||
+ unique_id_ == UniqueValueId(heap->true_value()) ||
+ unique_id_ == UniqueValueId(heap->false_value()) ||
+ unique_id_ == UniqueValueId(heap->the_hole_value()) ||
+ unique_id_ == UniqueValueId(heap->empty_string());
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -3227,7 +3274,9 @@ class HConstant: public HTemplateInstruction<0> {
return has_int32_value_;
}
- virtual bool EmitAtUses() { return !representation().IsDouble(); }
+ virtual bool EmitAtUses() {
+ return !representation().IsDouble() || IsSpecialDouble();
+ }
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); }
@@ -3246,6 +3295,16 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
+ bool IsTheHole() const {
+ if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
+ return true;
+ }
+ Heap* heap = isolate()->heap();
+ if (!handle_.is_null() && *handle_ == heap->the_hole_value()) {
+ return true;
+ }
+ return false;
+ }
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
@@ -3274,24 +3333,21 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- intptr_t hash;
-
if (has_int32_value_) {
- hash = static_cast<intptr_t>(int32_value_);
+ return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
- hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+ return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
} else {
ASSERT(!handle_.is_null());
- // Dereferencing to use the object's raw address for hashing is safe.
- HandleDereferenceGuard allow_handle_deref(isolate(),
- HandleDereferenceGuard::ALLOW);
- SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
- !isolate()->optimizing_compiler_thread()->IsOptimizerThread());
- hash = reinterpret_cast<intptr_t>(*handle_);
+ return unique_id_.Hashcode();
}
+ }
- return hash;
+ virtual void FinalizeUniqueValueId() {
+ if (!has_double_value_) {
+ ASSERT(!handle_.is_null());
+ unique_id_ = UniqueValueId(handle_);
+ }
}
#ifdef DEBUG
@@ -3315,7 +3371,7 @@ class HConstant: public HTemplateInstruction<0> {
} else {
ASSERT(!handle_.is_null());
return !other_constant->handle_.is_null() &&
- handle_.is_identical_to(other_constant->handle_);
+ unique_id_ == other_constant->unique_id_;
}
}
@@ -3329,6 +3385,7 @@ class HConstant: public HTemplateInstruction<0> {
// constant is non-numeric, handle_ always points to a valid
// constant HeapObject.
Handle<Object> handle_;
+ UniqueValueId unique_id_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
@@ -4740,7 +4797,7 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
- : cell_(cell), details_(details) {
+ : cell_(cell), details_(details), unique_id_() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnGlobalVars);
@@ -4752,13 +4809,11 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
- ASSERT_ALLOCATION_DISABLED;
- // Dereferencing to use the object's raw address for hashing is safe.
- HandleDereferenceGuard allow_handle_deref(isolate(),
- HandleDereferenceGuard::ALLOW);
- SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
- !isolate()->optimizing_compiler_thread()->IsOptimizerThread());
- return reinterpret_cast<intptr_t>(*cell_);
+ return unique_id_.Hashcode();
+ }
+
+ virtual void FinalizeUniqueValueId() {
+ unique_id_ = UniqueValueId(cell_);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -4770,7 +4825,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) {
HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
- return cell_.is_identical_to(b->cell());
+ return unique_id_ == b->unique_id_;
}
private:
@@ -4778,6 +4833,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
Handle<JSGlobalPropertyCell> cell_;
PropertyDetails details_;
+ UniqueValueId unique_id_;
};
@@ -4860,7 +4916,6 @@ class HAllocate: public HTemplateInstruction<2> {
HAllocate(HValue* context, HValue* size, HType type, Flags flags)
: type_(type),
flags_(flags) {
- ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
@@ -4955,7 +5010,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
new_space_dominator);
}
if (object != new_space_dominator) return true;
- if (object->IsFastLiteral()) return false;
if (object->IsAllocateObject()) return false;
if (object->IsAllocate()) {
return !HAllocate::cast(object)->GuaranteedInNewSpace();
@@ -5238,12 +5292,16 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
static const int kMaxLoadPolymorphism = 4;
+ virtual void FinalizeUniqueValueId();
+
protected:
virtual bool DataEquals(HValue* value);
private:
SmallMapList types_;
Handle<String> name_;
+ ZoneList<UniqueValueId> types_unique_ids_;
+ UniqueValueId name_unique_id_;
bool need_generic_;
};
@@ -5507,6 +5565,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
: name_(name),
is_in_object_(in_object),
offset_(offset),
+ transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
@@ -5537,6 +5596,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
+ UniqueValueId transition_unique_id() const { return transition_unique_id_; }
void set_transition(Handle<Map> map) { transition_ = map; }
HValue* new_space_dominator() const { return new_space_dominator_; }
@@ -5549,11 +5609,16 @@ class HStoreNamedField: public HTemplateInstruction<2> {
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
+ virtual void FinalizeUniqueValueId() {
+ transition_unique_id_ = UniqueValueId(transition_);
+ }
+
private:
Handle<String> name_;
bool is_in_object_;
int offset_;
Handle<Map> transition_;
+ UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
};
@@ -5677,6 +5742,10 @@ class HStoreKeyed
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+ bool IsConstantHoleStore() {
+ return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
+ }
+
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
@@ -5750,6 +5819,8 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
Handle<Map> transitioned_map)
: original_map_(original_map),
transitioned_map_(transitioned_map),
+ original_map_unique_id_(),
+ transitioned_map_unique_id_(),
from_kind_(original_map->elements_kind()),
to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
@@ -5780,18 +5851,25 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
+ virtual void FinalizeUniqueValueId() {
+ original_map_unique_id_ = UniqueValueId(original_map_);
+ transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
+ }
+
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
- return original_map_.is_identical_to(instr->original_map()) &&
- transitioned_map_.is_identical_to(instr->transitioned_map());
+ return original_map_unique_id_ == instr->original_map_unique_id_ &&
+ transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
}
private:
Handle<Map> original_map_;
Handle<Map> transitioned_map_;
+ UniqueValueId original_map_unique_id_;
+ UniqueValueId transitioned_map_unique_id_;
ElementsKind from_kind_;
ElementsKind to_kind_;
};
@@ -5966,45 +6044,6 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};
-class HFastLiteral: public HMaterializedLiteral<1> {
- public:
- HFastLiteral(HValue* context,
- Handle<JSObject> boilerplate,
- int total_size,
- int literal_index,
- int depth,
- AllocationSiteMode mode)
- : HMaterializedLiteral<1>(literal_index, depth, mode),
- boilerplate_(boilerplate),
- total_size_(total_size) {
- SetOperandAt(0, context);
- SetGVNFlag(kChangesNewSpacePromotion);
- }
-
- // Maximum depth and total number of elements and properties for literal
- // graphs to be considered for fast deep-copying.
- static const int kMaxLiteralDepth = 3;
- static const int kMaxLiteralProperties = 8;
-
- HValue* context() { return OperandAt(0); }
- Handle<JSObject> boilerplate() const { return boilerplate_; }
- int total_size() const { return total_size_; }
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
- virtual Handle<Map> GetMonomorphicJSObjectMap() {
- return Handle<Map>(boilerplate()->map());
- }
- virtual HType CalculateInferredType();
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
-
- private:
- Handle<JSObject> boilerplate_;
- int total_size_;
-};
-
-
class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
@@ -6192,7 +6231,7 @@ class HToFastProperties: public HUnaryOperation {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
// object literals.
- ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
+ ASSERT(value->IsObjectLiteral());
set_representation(Representation::Tagged());
}
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index abff7b778..127d7a9aa 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -517,7 +517,6 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
// Allow dereferencing for debug mode verification.
- Heap::RelocationLock(isolate()->heap());
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
for (int i = 0; i < blocks_.length(); i++) {
@@ -619,6 +618,7 @@ HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
isolate()->factory()->name##_value(), \
+ UniqueValueId(isolate()->heap()->name##_value()), \
Representation::Tagged(), \
htype, \
false, \
@@ -880,6 +880,7 @@ HGraph* HGraphBuilder::CreateGraph() {
HPhase phase("H_Block building", isolate());
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
+ graph()->FinalizeUniqueValueIds();
return graph_;
}
@@ -887,6 +888,9 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
+ if (no_side_effects_scope_count_ > 0) {
+ instr->SetFlag(HValue::kHasNoObservableSideEffects);
+ }
return instr;
}
@@ -894,6 +898,7 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
+ ASSERT(no_side_effects_scope_count_ == 0);
current_block()->AddSimulate(id, removable);
environment()->set_previous_ast_id(id);
}
@@ -1041,7 +1046,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* length,
HValue* key,
bool is_js_array) {
- BailoutId ast_id = environment()->previous_ast_id();
Zone* zone = this->zone();
IfBuilder length_checker(this);
@@ -1074,7 +1078,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HAdd::New(zone, context, length, graph_->GetConstant1()));
new_length->ChangeRepresentation(Representation::Integer32());
new_length->ClearFlag(HValue::kCanOverflow);
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
Factory* factory = isolate()->factory();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
@@ -1083,7 +1086,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length, true,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
length_checker.BeginElse();
@@ -1210,6 +1212,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
if (IsGrowStoreMode(store_mode)) {
+ NoObservableSideEffectsScope no_effects(this);
+
elements = BuildCheckForCapacityGrow(object, elements, elements_kind,
length, key, is_js_array);
checked_key = key;
@@ -1219,6 +1223,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
+ NoObservableSideEffectsScope no_effects(this);
+
elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
length);
} else {
@@ -1238,7 +1244,6 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
ElementsKind kind,
HValue* capacity) {
- BailoutId ast_id = current_block()->last_environment()->previous_ast_id();
Zone* zone = this->zone();
int elements_size = IsFastDoubleElementsKind(kind)
@@ -1260,10 +1265,14 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
- // TODO(hpayer): add support for old data space
- if (FLAG_pretenure_literals && !IsFastDoubleElementsKind(kind)) {
- flags = static_cast<HAllocate::Flags>(
- flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ if (FLAG_pretenure_literals) {
+ if (IsFastDoubleElementsKind(kind)) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
+ } else {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
}
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
@@ -1273,27 +1282,40 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
HValue* elements =
AddInstruction(new(zone) HAllocate(context, total_size,
HType::JSArray(), flags));
+ return elements;
+}
+
+void HGraphBuilder::BuildInitializeElements(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity) {
+ Zone* zone = this->zone();
Factory* factory = isolate()->factory();
Handle<Map> map = IsFastDoubleElementsKind(kind)
? factory->fixed_double_array_map()
: factory->fixed_array_map();
- BuildStoreMap(elements, map, ast_id);
+ BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
capacity, true, FixedArray::kLengthOffset);
AddInstruction(store_length);
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
+}
- return elements;
+
+HValue* HGraphBuilder::BuildAllocateAndInitializeElements(HValue* context,
+ ElementsKind kind,
+ HValue* capacity) {
+ HValue* new_elements =
+ BuildAllocateElements(context, kind, capacity);
+ BuildInitializeElements(new_elements, kind, capacity);
+ return new_elements;
}
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- HValue* map,
- BailoutId id) {
+ HValue* map) {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
Handle<String> map_field_name = factory->map_field_string();
@@ -1302,18 +1324,16 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
true, JSObject::kMapOffset);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
- AddSimulate(id, REMOVABLE_SIMULATE);
return store_map;
}
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
- Handle<Map> map,
- BailoutId id) {
+ Handle<Map> map) {
Zone* zone = this->zone();
HValue* map_constant =
AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
- return BuildStoreMap(object, map_constant, id);
+ return BuildStoreMap(object, map_constant);
}
@@ -1372,7 +1392,7 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
BuildNewSpaceArrayCheck(new_capacity, kind);
HValue* new_elements =
- BuildAllocateElements(context, kind, new_capacity);
+ BuildAllocateAndInitializeElements(context, kind, new_capacity);
BuildCopyElements(context, elements, kind,
new_elements, kind,
@@ -1395,7 +1415,6 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
ElementsKind elements_kind,
HValue* from,
HValue* to) {
- BailoutId ast_id = current_block()->last_environment()->previous_ast_id();
// Fast elements kinds need to be initialized in case statements below cause
// a garbage collection.
Factory* factory = isolate()->factory();
@@ -1413,7 +1432,6 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
HValue* key = builder.BeginBody(from, to, Token::LT);
AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder.EndBody();
}
@@ -1426,7 +1444,6 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
ElementsKind to_elements_kind,
HValue* length,
HValue* capacity) {
- BailoutId ast_id = environment()->previous_ast_id();
bool pre_fill_with_holes =
IsFastDoubleElementsKind(from_elements_kind) &&
IsFastObjectElementsKind(to_elements_kind);
@@ -1450,7 +1467,6 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
to_elements_kind));
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder.EndBody();
@@ -1462,6 +1478,119 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
}
+HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
+ HValue* boilerplate,
+ AllocationSiteMode mode,
+ ElementsKind kind,
+ int length) {
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+
+ NoObservableSideEffectsScope no_effects(this);
+
+ // All sizes here are multiples of kPointerSize.
+ int size = JSArray::kSize;
+ if (mode == TRACK_ALLOCATION_SITE) {
+ size += AllocationSiteInfo::kSize;
+ }
+ int elems_offset = size;
+ if (length > 0) {
+ size += IsFastDoubleElementsKind(kind)
+ ? FixedDoubleArray::SizeFor(length)
+ : FixedArray::SizeFor(length);
+ }
+
+ HAllocate::Flags allocate_flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
+ if (IsFastDoubleElementsKind(kind)) {
+ allocate_flags = static_cast<HAllocate::Flags>(
+ allocate_flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
+ }
+
+ // Allocate both the JS array and the elements array in one big
+ // allocation. This avoids multiple limit checks.
+ HValue* size_in_bytes =
+ AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
+ HInstruction* object =
+ AddInstruction(new(zone) HAllocate(context,
+ size_in_bytes,
+ HType::JSObject(),
+ allocate_flags));
+
+ // Copy the JS array part.
+ for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
+ if ((i != JSArray::kElementsOffset) || (length == 0)) {
+ HInstruction* value =
+ AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
+ if (i != JSArray::kMapOffset) {
+ AddInstruction(new(zone) HStoreNamedField(object,
+ factory->empty_string(),
+ value,
+ true, i));
+ } else {
+ BuildStoreMap(object, value);
+ }
+ }
+ }
+
+ // Create an allocation site info if requested.
+ if (mode == TRACK_ALLOCATION_SITE) {
+ HValue* alloc_site =
+ AddInstruction(new(zone) HInnerAllocatedObject(object, JSArray::kSize));
+ Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
+ BuildStoreMap(alloc_site, alloc_site_map);
+ int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
+ AddInstruction(new(zone) HStoreNamedField(alloc_site,
+ factory->empty_string(),
+ boilerplate,
+ true, alloc_payload_offset));
+ }
+
+ if (length > 0) {
+ // Get hold of the elements array of the boilerplate and setup the
+ // elements pointer in the resulting object.
+ HValue* boilerplate_elements =
+ AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
+ HValue* object_elements =
+ AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
+ AddInstruction(new(zone) HStoreNamedField(object,
+ factory->elements_field_string(),
+ object_elements,
+ true, JSObject::kElementsOffset));
+
+ // Copy the elements array header.
+ for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
+ HInstruction* value =
+ AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
+ true, i));
+ AddInstruction(new(zone) HStoreNamedField(object_elements,
+ factory->empty_string(),
+ value,
+ true, i));
+ }
+
+ // Copy the elements array contents.
+ // TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold
+ // copying loops with constant length up to a given boundary and use this
+ // helper here instead.
+ for (int i = 0; i < length; i++) {
+ HValue* key_constant =
+ AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
+ HInstruction* value =
+ AddInstruction(new(zone) HLoadKeyed(boilerplate_elements,
+ key_constant,
+ NULL,
+ kind));
+ AddInstruction(new(zone) HStoreKeyed(object_elements,
+ key_constant,
+ value,
+ kind));
+ }
+ }
+
+ return object;
+}
+
+
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: HGraphBuilder(info),
@@ -1568,6 +1697,19 @@ HBasicBlock* HGraph::CreateBasicBlock() {
}
+void HGraph::FinalizeUniqueValueIds() {
+ AssertNoAllocation no_gc;
+ ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
+ for (int i = 0; i < blocks()->length(); ++i) {
+ for (HInstruction* instr = blocks()->at(i)->first();
+ instr != NULL;
+ instr = instr->next()) {
+ instr->FinalizeUniqueValueId();
+ }
+ }
+}
+
+
void HGraph::Canonicalize() {
if (!FLAG_use_canonicalizing) return;
HPhase phase("H_Canonicalize", this);
@@ -2358,8 +2500,10 @@ HValueMap::HValueMap(Zone* zone, const HValueMap* other)
array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
- memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
- memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ array_, other->array_, array_size_ * sizeof(HValueMapListElement));
+ OS::MemCopy(
+ lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
@@ -2485,7 +2629,7 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
lists_ = new_lists;
if (old_lists != NULL) {
- memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
+ OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@@ -2531,7 +2675,7 @@ HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
if (this != &other) {
- memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
+ OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
@@ -2829,7 +2973,7 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
size_t string_len = strlen(underlying_buffer) + 1;
ASSERT(string_len <= sizeof(underlying_buffer));
char* result = new char[strlen(underlying_buffer) + 1];
- memcpy(result, underlying_buffer, string_len);
+ OS::MemCopy(result, underlying_buffer, string_len);
return SmartArrayPointer<char>(result);
}
@@ -3295,10 +3439,11 @@ void HInferRepresentation::Analyze() {
void HGraph::MergeRemovableSimulates() {
+ ZoneList<HSimulate*> mergelist(2, zone());
for (int i = 0; i < blocks()->length(); ++i) {
HBasicBlock* block = blocks()->at(i);
- // Always reset the folding candidate at the start of a block.
- HSimulate* folding_candidate = NULL;
+ // Make sure the merge list is empty at the start of a block.
+ ASSERT(mergelist.is_empty());
// Nasty heuristic: Never remove the first simulate in a block. This
// just so happens to have a beneficial effect on register allocation.
bool first = true;
@@ -3309,33 +3454,38 @@ void HGraph::MergeRemovableSimulates() {
// in the outer environment.
// (Before each HEnterInlined, there is a non-foldable HSimulate
// anyway, so we get the barrier in the other direction for free.)
- if (folding_candidate != NULL) {
- folding_candidate->DeleteAndReplaceWith(NULL);
+ // Simply remove all accumulated simulates without merging. This
+ // is safe because simulates after instructions with side effects
+ // are never added to the merge list.
+ while (!mergelist.is_empty()) {
+ mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
}
- folding_candidate = NULL;
continue;
}
- // If we have an HSimulate and a candidate, perform the folding.
+ // Skip the non-simulates and the first simulate.
if (!current->IsSimulate()) continue;
if (first) {
first = false;
continue;
}
HSimulate* current_simulate = HSimulate::cast(current);
- if (folding_candidate != NULL) {
- folding_candidate->MergeInto(current_simulate);
- folding_candidate->DeleteAndReplaceWith(NULL);
- folding_candidate = NULL;
- }
- // Check if the current simulate is a candidate for folding.
- if (current_simulate->previous()->HasObservableSideEffects() &&
- !current_simulate->next()->IsSimulate()) {
- continue;
- }
- if (!current_simulate->is_candidate_for_removal()) {
+ if ((current_simulate->previous()->HasObservableSideEffects() &&
+ !current_simulate->next()->IsSimulate()) ||
+ !current_simulate->is_candidate_for_removal()) {
+ // This simulate is not suitable for folding.
+ // Fold the ones accumulated so far.
+ current_simulate->MergeWith(&mergelist);
continue;
+ } else {
+ // Accumulate this simulate for folding later on.
+ mergelist.Add(current_simulate, zone());
}
- folding_candidate = current_simulate;
+ }
+
+ if (!mergelist.is_empty()) {
+ // Merge the accumulated simulates at the end of the block.
+ HSimulate* last = mergelist.RemoveLast();
+ last->MergeWith(&mergelist);
}
}
}
@@ -4193,8 +4343,6 @@ bool HOptimizedGraphBuilder::BuildGraph() {
void HGraph::GlobalValueNumbering() {
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
- // We use objects' raw addresses for identification, so they must not move.
- Heap::RelocationLock relocation_lock(isolate()->heap());
HPhase phase("H_Global value numbering", this);
HGlobalValueNumberer gvn(this, info());
bool removed_side_effects = gvn.Analyze();
@@ -5977,7 +6125,8 @@ static bool LookupSetter(Handle<Map> map,
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
int* max_properties,
- int* total_size) {
+ int* data_size,
+ int* pointer_size) {
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
@@ -5986,7 +6135,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
- *total_size += FixedDoubleArray::SizeFor(elements->length());
+ *data_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
@@ -5998,12 +6147,13 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
- total_size)) {
+ data_size,
+ pointer_size)) {
return false;
}
}
}
- *total_size += FixedArray::SizeFor(length);
+ *pointer_size += FixedArray::SizeFor(length);
} else {
return false;
}
@@ -6022,14 +6172,15 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
- total_size)) {
+ data_size,
+ pointer_size)) {
return false;
}
}
}
}
- *total_size += boilerplate->map()->instance_size();
+ *pointer_size += boilerplate->map()->instance_size();
return true;
}
@@ -6043,34 +6194,41 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()),
- isolate());
- if (boilerplate->IsJSObject() &&
- IsFastLiteral(Handle<JSObject>::cast(boilerplate),
- HFastLiteral::kMaxLiteralDepth,
+ int data_size = 0;
+ int pointer_size = 0;
+ int max_properties = kMaxFastLiteralProperties;
+ Handle<Object> original_boilerplate(closure->literals()->get(
+ expr->literal_index()), isolate());
+ if (original_boilerplate->IsJSObject() &&
+ IsFastLiteral(Handle<JSObject>::cast(original_boilerplate),
+ kMaxFastLiteralDepth,
&max_properties,
- &total_size)) {
- Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
- literal = new(zone()) HFastLiteral(context,
- boilerplate_object,
- total_size,
- expr->literal_index(),
- expr->depth(),
- DONT_TRACK_ALLOCATION_SITE);
+ &data_size,
+ &pointer_size)) {
+ Handle<JSObject> original_boilerplate_object =
+ Handle<JSObject>::cast(original_boilerplate);
+ Handle<JSObject> boilerplate_object =
+ DeepCopy(original_boilerplate_object);
+
+ literal = BuildFastLiteral(context,
+ boilerplate_object,
+ original_boilerplate_object,
+ data_size,
+ pointer_size,
+ DONT_TRACK_ALLOCATION_SITE);
} else {
- literal = new(zone()) HObjectLiteral(context,
- expr->constant_properties(),
- expr->fast_elements(),
- expr->literal_index(),
- expr->depth(),
- expr->has_function());
+ literal = AddInstruction(
+ new(zone()) HObjectLiteral(context,
+ expr->constant_properties(),
+ expr->fast_elements(),
+ expr->literal_index(),
+ expr->depth(),
+ expr->has_function()));
}
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
- PushAndAdd(literal);
+ Push(literal);
expr->CalculateEmitStore(zone());
@@ -6167,9 +6325,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
}
- Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
+ Handle<JSObject> original_boilerplate_object =
+ Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
- Handle<JSObject>::cast(boilerplate)->GetElementsKind();
+ Handle<JSObject>::cast(original_boilerplate_object)->GetElementsKind();
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
@@ -6178,33 +6337,38 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
- int total_size = 0;
- int max_properties = HFastLiteral::kMaxLiteralProperties;
- if (IsFastLiteral(boilerplate,
- HFastLiteral::kMaxLiteralDepth,
+ int data_size = 0;
+ int pointer_size = 0;
+ int max_properties = kMaxFastLiteralProperties;
+ if (IsFastLiteral(original_boilerplate_object,
+ kMaxFastLiteralDepth,
&max_properties,
- &total_size)) {
+ &data_size,
+ &pointer_size)) {
if (mode == TRACK_ALLOCATION_SITE) {
- total_size += AllocationSiteInfo::kSize;
- }
- literal = new(zone()) HFastLiteral(context,
- boilerplate,
- total_size,
- expr->literal_index(),
- expr->depth(),
- mode);
+ pointer_size += AllocationSiteInfo::kSize;
+ }
+
+ Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
+ literal = BuildFastLiteral(context,
+ boilerplate_object,
+ original_boilerplate_object,
+ data_size,
+ pointer_size,
+ mode);
} else {
- literal = new(zone()) HArrayLiteral(context,
- boilerplate,
- length,
- expr->literal_index(),
- expr->depth(),
- mode);
+ literal = AddInstruction(
+ new(zone()) HArrayLiteral(context,
+ original_boilerplate_object,
+ length,
+ expr->literal_index(),
+ expr->depth(),
+ mode));
}
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
- PushAndAdd(literal);
+ Push(literal);
HLoadElements* elements = NULL;
@@ -6420,8 +6584,9 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
}
AddInstruction(new(zone()) HCheckNonSmi(object));
+
HInstruction* typecheck =
- AddInstruction(HCheckInstanceType::NewIsJSArray(object, zone()));
+ AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
HInstruction* instr =
HLoadNamedField::NewArrayLength(zone(), object, typecheck);
instr->set_position(expr->position());
@@ -7113,8 +7278,10 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
- HInstruction* holder_value = AddInstruction(
+ AddInstruction(
new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
+ HValue* holder_value = AddInstruction(
+ new(zone()) HConstant(holder, Representation::Tagged()));
return BuildLoadNamedField(holder_value, holder_map, &lookup);
}
@@ -7990,8 +8157,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Parse and allocate variables.
CompilationInfo target_info(target, zone());
Handle<SharedFunctionInfo> target_shared(target->shared());
- if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
- !Scope::Analyze(&target_info)) {
+ if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
@@ -9942,6 +10108,241 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
}
+HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
+ HValue* context,
+ Handle<JSObject> boilerplate_object,
+ Handle<JSObject> original_boilerplate_object,
+ int data_size,
+ int pointer_size,
+ AllocationSiteMode mode) {
+ Zone* zone = this->zone();
+ int total_size = data_size + pointer_size;
+
+ NoObservableSideEffectsScope no_effects(this);
+
+ HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
+ // TODO(hpayer): add support for old data space
+ if (FLAG_pretenure_literals &&
+ isolate()->heap()->ShouldGloballyPretenure() &&
+ data_size == 0) {
+ flags = static_cast<HAllocate::Flags>(
+ flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
+ }
+
+ HValue* size_in_bytes =
+ AddInstruction(new(zone) HConstant(total_size,
+ Representation::Integer32()));
+ HInstruction* result =
+ AddInstruction(new(zone) HAllocate(context,
+ size_in_bytes,
+ HType::JSObject(),
+ flags));
+ int offset = 0;
+ BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, result,
+ &offset, mode);
+ return result;
+}
+
+
+void HOptimizedGraphBuilder::BuildEmitDeepCopy(
+ Handle<JSObject> boilerplate_object,
+ Handle<JSObject> original_boilerplate_object,
+ HInstruction* target,
+ int* offset,
+ AllocationSiteMode mode) {
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+
+ HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
+ original_boilerplate_object, Representation::Tagged()));
+
+ bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
+ boilerplate_object->map()->CanTrackAllocationSite();
+
+ // Only elements backing stores for non-COW arrays need to be copied.
+ Handle<FixedArrayBase> elements(boilerplate_object->elements());
+ Handle<FixedArrayBase> original_elements(
+ original_boilerplate_object->elements());
+ ElementsKind kind = boilerplate_object->map()->elements_kind();
+
+ // Increase the offset so that subsequent objects end up right after
+ // this object and its backing store.
+ int object_offset = *offset;
+ int object_size = boilerplate_object->map()->instance_size();
+ int elements_size = (elements->length() > 0 &&
+ elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
+ elements->Size() : 0;
+ int elements_offset = *offset + object_size;
+ int inobject_properties = boilerplate_object->map()->inobject_properties();
+ if (create_allocation_site_info) {
+ elements_offset += AllocationSiteInfo::kSize;
+ *offset += AllocationSiteInfo::kSize;
+ }
+
+ *offset += object_size + elements_size;
+
+ HValue* object_elements = BuildCopyObjectHeader(boilerplate_object, target,
+ object_offset, elements_offset, elements_size);
+
+ // Copy in-object properties.
+ HValue* object_properties =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
+ for (int i = 0; i < inobject_properties; i++) {
+ Handle<Object> value =
+ Handle<Object>(boilerplate_object->InObjectPropertyAt(i),
+ isolate());
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<JSObject> original_value_object = Handle<JSObject>::cast(
+ Handle<Object>(original_boilerplate_object->InObjectPropertyAt(i),
+ isolate()));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ AddInstruction(new(zone) HStoreNamedField(
+ object_properties, factory->unknown_field_string(), value_instruction,
+ true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ BuildEmitDeepCopy(value_object, original_value_object, target,
+ offset, DONT_TRACK_ALLOCATION_SITE);
+ } else {
+ HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
+ value, Representation::Tagged()));
+ AddInstruction(new(zone) HStoreNamedField(
+ object_properties, factory->unknown_field_string(), value_instruction,
+ true, boilerplate_object->GetInObjectPropertyOffset(i)));
+ }
+ }
+
+ // Build Allocation Site Info if desired
+ if (create_allocation_site_info) {
+ HValue* alloc_site =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, JSArray::kSize));
+ Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
+ BuildStoreMap(alloc_site, alloc_site_map);
+ int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
+ AddInstruction(new(zone) HStoreNamedField(alloc_site,
+ factory->payload_string(),
+ original_boilerplate,
+ true, alloc_payload_offset));
+ }
+
+ if (object_elements != NULL) {
+ HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
+ elements, Representation::Tagged()));
+
+ int elements_length = elements->length();
+ HValue* object_elements_length =
+ AddInstruction(new(zone) HConstant(
+ elements_length, Representation::Integer32()));
+
+ BuildInitializeElements(object_elements, kind, object_elements_length);
+
+ // Copy elements backing store content.
+ if (elements->IsFixedDoubleArray()) {
+ for (int i = 0; i < elements_length; i++) {
+ HValue* key_constant =
+ AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HLoadKeyed(
+ boilerplate_elements, key_constant, NULL, kind));
+ AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ }
+ } else if (elements->IsFixedArray()) {
+ Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
+ Handle<FixedArray> original_fast_elements =
+ Handle<FixedArray>::cast(original_elements);
+ for (int i = 0; i < elements_length; i++) {
+ Handle<Object> value(fast_elements->get(i), isolate());
+ HValue* key_constant =
+ AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
+ if (value->IsJSObject()) {
+ Handle<JSObject> value_object = Handle<JSObject>::cast(value);
+ Handle<JSObject> original_value_object = Handle<JSObject>::cast(
+ Handle<Object>(original_fast_elements->get(i), isolate()));
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
+ AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ BuildEmitDeepCopy(value_object, original_value_object, target,
+ offset, DONT_TRACK_ALLOCATION_SITE);
+ } else {
+ HInstruction* value_instruction =
+ AddInstruction(new(zone) HLoadKeyed(
+ boilerplate_elements, key_constant, NULL, kind));
+ AddInstruction(new(zone) HStoreKeyed(
+ object_elements, key_constant, value_instruction, kind));
+ }
+ }
+ } else {
+ UNREACHABLE();
+ }
+ }
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
+ Handle<JSObject> boilerplate_object,
+ HInstruction* target,
+ int object_offset,
+ int elements_offset,
+ int elements_size) {
+ ASSERT(boilerplate_object->properties()->length() == 0);
+ Zone* zone = this->zone();
+ Factory* factory = isolate()->factory();
+ HValue* result = NULL;
+
+ HValue* object_header =
+ AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
+ Handle<Map> boilerplate_object_map(boilerplate_object->map());
+ BuildStoreMap(object_header, boilerplate_object_map);
+
+ HInstruction* elements;
+ if (elements_size == 0) {
+ Handle<Object> elements_field =
+ Handle<Object>(boilerplate_object->elements(), isolate());
+ elements = AddInstruction(new(zone) HConstant(
+ elements_field, Representation::Tagged()));
+ } else {
+ elements = AddInstruction(new(zone) HInnerAllocatedObject(
+ target, elements_offset));
+ result = elements;
+ }
+ HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
+ object_header,
+ factory->elements_field_string(),
+ elements,
+ true, JSObject::kElementsOffset));
+ elements_store->SetGVNFlag(kChangesElementsPointer);
+
+ Handle<Object> properties_field =
+ Handle<Object>(boilerplate_object->properties(), isolate());
+ ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
+ HInstruction* properties = AddInstruction(new(zone) HConstant(
+ properties_field, Representation::None()));
+ AddInstruction(new(zone) HStoreNamedField(object_header,
+ factory->empty_string(),
+ properties,
+ true, JSObject::kPropertiesOffset));
+
+ if (boilerplate_object->IsJSArray()) {
+ Handle<JSArray> boilerplate_array =
+ Handle<JSArray>::cast(boilerplate_object);
+ Handle<Object> length_field =
+ Handle<Object>(boilerplate_array->length(), isolate());
+ HInstruction* length = AddInstruction(new(zone) HConstant(
+ length_field, Representation::None()));
+ HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
+ object_header,
+ factory->length_field_string(),
+ length,
+ true, JSArray::kLengthOffset));
+ length_store->SetGVNFlag(kChangesArrayLengths);
+ }
+
+ return result;
+}
+
+
void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -10583,7 +10984,13 @@ void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
- return Bailout("inlined runtime function: MathSqrt");
+ ASSERT(call->arguments()->length() == 1);
+ CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+ HValue* value = Pop();
+ HValue* context = environment()->LookupContext();
+ HInstruction* result =
+ HUnaryMathOperation::New(zone(), context, value, kMathSqrt);
+ return ast_context()->ReturnInstruction(result, call->id());
}
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index ef3679eca..3dbca3c3e 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -260,6 +260,7 @@ class HGraph: public ZoneObject {
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
+ void FinalizeUniqueValueIds();
void InitializeInferredTypes();
void InsertTypeConversions();
void MergeRemovableSimulates();
@@ -865,7 +866,10 @@ class FunctionState {
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info)
- : info_(info), graph_(NULL), current_block_(NULL) {}
+ : info_(info),
+ graph_(NULL),
+ current_block_(NULL),
+ no_side_effects_scope_count_(0) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@@ -891,6 +895,14 @@ class HGraphBuilder {
HReturn* AddReturn(HValue* value);
+ void IncrementInNoSideEffectsScope() {
+ no_side_effects_scope_count_++;
+ }
+
+ void DecrementInNoSideEffectsScope() {
+ no_side_effects_scope_count_--;
+ }
+
protected:
virtual bool BuildGraph() = 0;
@@ -939,8 +951,8 @@ class HGraphBuilder {
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None());
- HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
- HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
+ HInstruction* BuildStoreMap(HValue* object, HValue* map);
+ HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
class CheckBuilder {
public:
@@ -1032,6 +1044,20 @@ class HGraphBuilder {
bool finished_;
};
+ class NoObservableSideEffectsScope {
+ public:
+ explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
+ builder_(builder) {
+ builder_->IncrementInNoSideEffectsScope();
+ }
+ ~NoObservableSideEffectsScope() {
+ builder_->DecrementInNoSideEffectsScope();
+ }
+
+ private:
+ HGraphBuilder* builder_;
+ };
+
HValue* BuildNewElementsCapacity(HValue* context,
HValue* old_capacity);
@@ -1042,6 +1068,14 @@ class HGraphBuilder {
ElementsKind kind,
HValue* capacity);
+ void BuildInitializeElements(HValue* elements,
+ ElementsKind kind,
+ HValue* capacity);
+
+ HValue* BuildAllocateAndInitializeElements(HValue* context,
+ ElementsKind kind,
+ HValue* capacity);
+
HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
@@ -1062,11 +1096,18 @@ class HGraphBuilder {
HValue* length,
HValue* capacity);
+ HValue* BuildCloneShallowArray(HContext* context,
+ HValue* boilerplate,
+ AllocationSiteMode mode,
+ ElementsKind kind,
+ int length);
+
private:
HGraphBuilder();
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
+ int no_side_effects_scope_count_;
};
@@ -1183,6 +1224,11 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
static const int kUnlimitedMaxInlinedNodes = 10000;
static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
+ // Maximum depth and total number of elements and properties for literal
+ // graphs to be considered for fast deep-copying.
+ static const int kMaxFastLiteralDepth = 3;
+ static const int kMaxFastLiteralProperties = 8;
+
// Simple accessors.
void set_function_state(FunctionState* state) { function_state_ = state; }
@@ -1459,6 +1505,26 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildThisFunction();
+ HInstruction* BuildFastLiteral(HValue* context,
+ Handle<JSObject> boilerplate_object,
+ Handle<JSObject> original_boilerplate_object,
+ int data_size,
+ int pointer_size,
+ AllocationSiteMode mode);
+
+ void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
+ Handle<JSObject> object,
+ HInstruction* result,
+ int* offset,
+ AllocationSiteMode mode);
+
+ MUST_USE_RESULT HValue* BuildCopyObjectHeader(
+ Handle<JSObject> boilerplat_object,
+ HInstruction* target,
+ int object_offset,
+ int elements_offset,
+ int elements_size);
+
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index b48906e70..32fe6a9c1 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -2556,9 +2556,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate()->assembler_spare_buffer() == NULL &&
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index a3da9af43..d1c1fbf00 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -1026,6 +1026,13 @@ class Assembler : public AssemblerBase {
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void movdqu(const Operand& dst, XMMRegister src);
+ void movdq(bool aligned, XMMRegister dst, const Operand& src) {
+ if (aligned) {
+ movdqa(dst, src);
+ } else {
+ movdqu(dst, src);
+ }
+ }
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index e861db3ac..08bc22784 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -1810,25 +1810,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(eax, depth) instruction right after the call.
- Label stack_check;
- __ mov(ebx, Operand(esp, 0)); // return address
- if (FLAG_debug_code) {
- __ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
- __ Assert(equal, "test eax instruction not found after loop stack check");
- }
- __ movzx_b(ebx, Operand(ebx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
- __ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
@@ -1845,23 +1827,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // Insert a stack guard check so that if we decide not to perform
- // on-stack replacement right away, the function calling this stub can
- // still be interrupted.
- __ bind(&stack_check);
- Label ok;
- ExternalReference stack_limit =
- ExternalReference::address_of_stack_limit(masm->isolate());
- __ cmp(esp, Operand::StaticVariable(stack_limit));
- __ j(above_equal, &ok, Label::kNear);
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(eax);
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index 9b77c50c0..05dceb7e2 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@@ -43,6 +44,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { eax, ebx, ecx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
+
+
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -163,9 +176,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+ int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@@ -393,168 +404,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // ecx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ Allocate(size, eax, ebx, edx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ mov(FieldOperand(eax, allocation_info_start),
- Immediate(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(eax, i), ebx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(edx, Operand(eax, JSArray::kSize));
- }
- __ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ mov(ebx, FieldOperand(ecx, i));
- __ mov(FieldOperand(edx, i), ebx);
- }
- while (i < elements_size) {
- __ fld_d(FieldOperand(ecx, i));
- __ fstp_d(FieldOperand(edx, i));
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [esp + kPointerSize]: constant elements.
- // [esp + (2 * kPointerSize)]: literal index.
- // [esp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into ecx and check if we need to create a
- // boilerplate.
- __ mov(ecx, Operand(esp, 3 * kPointerSize));
- __ mov(eax, Operand(esp, 2 * kPointerSize));
- STATIC_ASSERT(kPointerSize == 4);
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
- FixedArray::kHeaderSize));
- Factory* factory = masm->isolate()->factory();
- __ cmp(ecx, factory->undefined_value());
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // ecx is boilerplate object.
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ CheckMap(ebx, factory->fixed_cow_array_map(),
- &check_fast_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ CheckMap(ebx, factory->fixed_array_map(),
- &double_elements, DONT_DO_SMI_CHECK);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Handle<Map> expected_map;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map = factory->fixed_array_map();
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map = factory->fixed_double_array_map();
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map = factory->fixed_cow_array_map();
- }
- __ push(ecx);
- __ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
- __ Assert(equal, message);
- __ pop(ecx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@@ -5076,6 +4925,7 @@ bool CEntryStub::IsPregenerated() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
@@ -5215,8 +5065,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
+ // Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
@@ -7786,9 +7641,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 550c83d51..caf808b95 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -173,21 +173,94 @@ UnaryMathFunction CreateSqrtFunction() {
}
-static void MemCopyWrapper(void* dest, const void* src, size_t size) {
- memcpy(dest, src, size);
+// Helper functions for CreateMemMoveFunction.
+#undef __
+#define __ ACCESS_MASM(masm)
+
+// Keep around global pointers to these objects so that Valgrind won't complain.
+static size_t* medium_handlers = NULL;
+static size_t* small_handlers = NULL;
+
+
+enum Direction { FORWARD, BACKWARD };
+enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
+
+// Expects registers:
+// esi - source, aligned if alignment == ALIGNED
+// edi - destination, always aligned
+// ecx - count (copy size in bytes)
+// edx - loop count (number of 64 byte chunks)
+void MemMoveEmitMainLoop(MacroAssembler* masm,
+ Label* move_last_15,
+ Direction direction,
+ Alignment alignment) {
+ Register src = esi;
+ Register dst = edi;
+ Register count = ecx;
+ Register loop_count = edx;
+ Label loop, move_last_31, move_last_63;
+ __ cmp(loop_count, 0);
+ __ j(equal, &move_last_63);
+ __ bind(&loop);
+ // Main loop. Copy in 64 byte chunks.
+ if (direction == BACKWARD) __ sub(src, Immediate(0x40));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
+ if (direction == FORWARD) __ add(src, Immediate(0x40));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ __ movdqa(Operand(dst, 0x20), xmm2);
+ __ movdqa(Operand(dst, 0x30), xmm3);
+ if (direction == FORWARD) __ add(dst, Immediate(0x40));
+ __ dec(loop_count);
+ __ j(not_zero, &loop);
+ // At most 63 bytes left to copy.
+ __ bind(&move_last_63);
+ __ test(count, Immediate(0x20));
+ __ j(zero, &move_last_31);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x20));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
+ __ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
+ if (direction == FORWARD) __ add(src, Immediate(0x20));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
+ __ movdqa(Operand(dst, 0x00), xmm0);
+ __ movdqa(Operand(dst, 0x10), xmm1);
+ if (direction == FORWARD) __ add(dst, Immediate(0x20));
+ // At most 31 bytes left to copy.
+ __ bind(&move_last_31);
+ __ test(count, Immediate(0x10));
+ __ j(zero, move_last_15);
+ if (direction == BACKWARD) __ sub(src, Immediate(0x10));
+ __ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
+ if (direction == FORWARD) __ add(src, Immediate(0x10));
+ if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
+ __ movdqa(Operand(dst, 0), xmm0);
+ if (direction == FORWARD) __ add(dst, Immediate(0x10));
+}
+
+
+void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
+ __ pop(esi);
+ __ pop(edi);
+ __ ret(0);
}
-OS::MemCopyFunction CreateMemCopyFunction() {
+#undef __
+#define __ masm.
+
+
+OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
- &actual_size,
- true));
- if (buffer == NULL) return &MemCopyWrapper;
+ byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+ if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // Generated code is put into a fixed, unmovable, buffer, and not into
+ // Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@@ -203,185 +276,369 @@ OS::MemCopyFunction CreateMemCopyFunction() {
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
+ // When copying up to this many bytes, use special "small" handlers.
+ const size_t kSmallCopySize = 8;
+ // When copying up to this many bytes, use special "medium" handlers.
+ const size_t kMediumCopySize = 63;
+ // When non-overlapping region of src and dst is less than this,
+ // use a more careful implementation (slightly slower).
+ const size_t kMinMoveDistance = 16;
+ // Note that these values are dictated by the implementation below,
+ // do not just change them and hope things will work!
+
int stack_offset = 0; // Update if we change the stack height.
- if (FLAG_debug_code) {
- __ cmp(Operand(esp, kSizeOffset + stack_offset),
- Immediate(OS::kMinComplexMemCopy));
- Label ok;
- __ j(greater_equal, &ok);
- __ int3();
- __ bind(&ok);
- }
+ Label backward, backward_much_overlap;
+ Label forward_much_overlap, small_size, medium_size, pop_and_return;
+ __ push(edi);
+ __ push(esi);
+ stack_offset += 2 * kPointerSize;
+ Register dst = edi;
+ Register src = esi;
+ Register count = ecx;
+ Register loop_count = edx;
+ __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
+ __ mov(src, Operand(esp, stack_offset + kSourceOffset));
+ __ mov(count, Operand(esp, stack_offset + kSizeOffset));
+
+ __ cmp(dst, src);
+ __ j(equal, &pop_and_return);
+
if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope enable(&masm, SSE2);
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
-
- __ movdqu(xmm0, Operand(src, 0));
- __ movdqu(Operand(dst, 0), xmm0);
- __ mov(edx, dst);
- __ and_(edx, 0xF);
- __ neg(edx);
- __ add(edx, Immediate(16));
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
-
- // edi is now aligned. Check if esi is also aligned.
- Label unaligned_source;
- __ test(src, Immediate(0x0F));
- __ j(not_zero, &unaligned_source);
+ CpuFeatureScope sse2_scope(&masm, SSE2);
+ __ prefetch(Operand(src, 0), 1);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ cmp(count, kMediumCopySize);
+ __ j(below_equal, &medium_size);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+
{
+ // |dst| is a lower address than |src|. Copy front-to-back.
+ Label unaligned_source, move_last_15, skip_last_move;
+ __ mov(eax, src);
+ __ sub(eax, dst);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &forward_much_overlap);
+ // Copy first 16 bytes.
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(Operand(dst, 0), xmm0);
+ // Determine distance to alignment: 16 - (dst & 0xF).
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ neg(edx);
+ __ add(edx, Immediate(16));
+ __ add(dst, edx);
+ __ add(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
- __ mov(edx, count);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop.
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqa(xmm0, Operand(src, 0x00));
- __ movdqa(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
-
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
- __ movdqa(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
+ __ bind(&move_last_15);
__ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
- }
- __ Align(16);
- {
// Copy loop for unaligned source and aligned destination.
- // If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
- __ mov(edx, ecx);
- Register loop_count = ecx;
- Register count = edx;
- __ shr(loop_count, 5);
- {
- // Main copy loop
- Label loop;
- __ bind(&loop);
- __ prefetch(Operand(src, 0x20), 1);
- __ movdqu(xmm0, Operand(src, 0x00));
- __ movdqu(xmm1, Operand(src, 0x10));
- __ add(src, Immediate(0x20));
-
- __ movdqa(Operand(dst, 0x00), xmm0);
- __ movdqa(Operand(dst, 0x10), xmm1);
- __ add(dst, Immediate(0x20));
-
- __ dec(loop_count);
- __ j(not_zero, &loop);
- }
+ MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
+ __ jmp(&move_last_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, last_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ mov_b(eax, Operand(src, 0));
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ dec(count);
+ __ bind(&forward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
+ FORWARD, MOVE_UNALIGNED);
+ __ bind(&last_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
- // At most 31 bytes to copy.
- Label move_less_16;
- __ test(count, Immediate(0x10));
- __ j(zero, &move_less_16);
+ {
+ // |dst| is a higher address than |src|. Copy backwards.
+ Label unaligned_source, move_first_15, skip_last_move;
+ __ bind(&backward);
+ // |dst| and |src| always point to the end of what's left to copy.
+ __ add(dst, count);
+ __ add(src, count);
+ __ mov(eax, dst);
+ __ sub(eax, src);
+ __ cmp(eax, kMinMoveDistance);
+ __ j(below, &backward_much_overlap);
+ // Copy last 16 bytes.
+ __ movdqu(xmm0, Operand(src, -0x10));
+ __ movdqu(Operand(dst, -0x10), xmm0);
+ // Find distance to alignment: dst & 0xF
+ __ mov(edx, dst);
+ __ and_(edx, 0xF);
+ __ sub(dst, edx);
+ __ sub(src, edx);
+ __ sub(count, edx);
+ // dst is now aligned. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ // Check if src is also aligned.
+ __ test(src, Immediate(0xF));
+ __ j(not_zero, &unaligned_source);
+ // Copy loop for aligned source and destination.
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
+ // At most 15 bytes to copy. Copy 16 bytes at beginning of string.
+ __ bind(&move_first_15);
+ __ and_(count, 0xF);
+ __ j(zero, &skip_last_move, Label::kNear);
+ __ sub(src, count);
+ __ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
- __ add(src, Immediate(0x10));
- __ movdqa(Operand(dst, 0), xmm0);
- __ add(dst, Immediate(0x10));
- __ bind(&move_less_16);
-
- // At most 15 bytes to copy. Copy 16 bytes at end of string.
- __ and_(count, 0x0F);
- __ movdqu(xmm0, Operand(src, count, times_1, -0x10));
- __ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
+ __ movdqu(Operand(dst, 0), xmm0);
+ __ bind(&skip_last_move);
+ MemMoveEmitPopAndReturn(&masm);
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // Copy loop for unaligned source and aligned destination.
+ __ bind(&unaligned_source);
+ MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
+ __ jmp(&move_first_15);
+
+ // Less than kMinMoveDistance offset between dst and src.
+ Label loop_until_aligned, first_15_much_overlap;
+ __ bind(&loop_until_aligned);
+ __ dec(src);
+ __ dec(dst);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ __ dec(count);
+ __ bind(&backward_much_overlap); // Entry point into this block.
+ __ test(dst, Immediate(0xF));
+ __ j(not_zero, &loop_until_aligned);
+ // dst is now aligned, src can't be. Main copy loop.
+ __ mov(loop_count, count);
+ __ shr(loop_count, 6);
+ MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
+ BACKWARD, MOVE_UNALIGNED);
+ __ bind(&first_15_much_overlap);
+ __ and_(count, 0xF);
+ __ j(zero, &pop_and_return);
+ // Small/medium handlers expect dst/src to point to the beginning.
+ __ sub(dst, count);
+ __ sub(src, count);
+ __ cmp(count, kSmallCopySize);
+ __ j(below_equal, &small_size);
+ __ jmp(&medium_size);
+ }
+ {
+ // Special handlers for 9 <= copy_size < 64. No assumptions about
+ // alignment or move distance, so all reads must be unaligned and
+ // must happen before any writes.
+ Label f9_16, f17_32, f33_48, f49_63;
+
+ __ bind(&f9_16);
+ __ movdbl(xmm0, Operand(src, 0));
+ __ movdbl(xmm1, Operand(src, count, times_1, -8));
+ __ movdbl(Operand(dst, 0), xmm0);
+ __ movdbl(Operand(dst, count, times_1, -8), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f17_32);
+ __ movdqu(xmm0, Operand(src, 0));
+ __ movdqu(xmm1, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f33_48);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f49_63);
+ __ movdqu(xmm0, Operand(src, 0x00));
+ __ movdqu(xmm1, Operand(src, 0x10));
+ __ movdqu(xmm2, Operand(src, 0x20));
+ __ movdqu(xmm3, Operand(src, count, times_1, -0x10));
+ __ movdqu(Operand(dst, 0x00), xmm0);
+ __ movdqu(Operand(dst, 0x10), xmm1);
+ __ movdqu(Operand(dst, 0x20), xmm2);
+ __ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
+ MemMoveEmitPopAndReturn(&masm);
+
+ medium_handlers = new size_t[4];
+ medium_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f9_16.pos();
+ medium_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f17_32.pos();
+ medium_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f33_48.pos();
+ medium_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f49_63.pos();
+
+ __ bind(&medium_size); // Entry point into this block.
+ __ mov(eax, count);
+ __ dec(eax);
+ __ shr(eax, 4);
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(eax, 3);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(eax, times_4,
+ reinterpret_cast<intptr_t>(medium_handlers)));
+ __ jmp(eax);
+ }
+ {
+ // Specialized copiers for copy_size <= 8 bytes.
+ Label f0, f1, f2, f3, f4, f5_8;
+ __ bind(&f0);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f1);
+ __ mov_b(eax, Operand(src, 0));
+ __ mov_b(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f2);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_w(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f3);
+ __ mov_w(eax, Operand(src, 0));
+ __ mov_b(edx, Operand(src, 2));
+ __ mov_w(Operand(dst, 0), eax);
+ __ mov_b(Operand(dst, 2), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f4);
+ __ mov(eax, Operand(src, 0));
+ __ mov(Operand(dst, 0), eax);
+ MemMoveEmitPopAndReturn(&masm);
+
+ __ bind(&f5_8);
+ __ mov(eax, Operand(src, 0));
+ __ mov(edx, Operand(src, count, times_1, -4));
+ __ mov(Operand(dst, 0), eax);
+ __ mov(Operand(dst, count, times_1, -4), edx);
+ MemMoveEmitPopAndReturn(&masm);
+
+ small_handlers = new size_t[9];
+ small_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f0.pos();
+ small_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f1.pos();
+ small_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f2.pos();
+ small_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f3.pos();
+ small_handlers[4] = reinterpret_cast<intptr_t>(buffer) + f4.pos();
+ small_handlers[5] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[6] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[7] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+ small_handlers[8] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
+
+ __ bind(&small_size); // Entry point into this block.
+ if (FLAG_debug_code) {
+ Label ok;
+ __ cmp(count, 8);
+ __ j(below_equal, &ok);
+ __ int3();
+ __ bind(&ok);
+ }
+ __ mov(eax, Operand(count, times_4,
+ reinterpret_cast<intptr_t>(small_handlers)));
+ __ jmp(eax);
}
-
} else {
- // SSE2 not supported. Unlikely to happen in practice.
- __ push(edi);
- __ push(esi);
- stack_offset += 2 * kPointerSize;
- __ cld();
- Register dst = edi;
- Register src = esi;
- Register count = ecx;
- __ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
- __ mov(src, Operand(esp, stack_offset + kSourceOffset));
- __ mov(count, Operand(esp, stack_offset + kSizeOffset));
-
- // Copy the first word.
- __ mov(eax, Operand(src, 0));
- __ mov(Operand(dst, 0), eax);
-
- // Increment src,dstso that dst is aligned.
- __ mov(edx, dst);
- __ and_(edx, 0x03);
- __ neg(edx);
- __ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
- __ add(dst, edx);
- __ add(src, edx);
- __ sub(count, edx);
- // edi is now aligned, ecx holds number of remaning bytes to copy.
-
- __ mov(edx, count);
- count = edx;
- __ shr(ecx, 2); // Make word count instead of byte count.
- __ rep_movs();
-
- // At most 3 bytes left to copy. Copy 4 bytes at end of string.
- __ and_(count, 3);
- __ mov(eax, Operand(src, count, times_1, -4));
- __ mov(Operand(dst, count, times_1, -4), eax);
-
- __ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
- __ pop(esi);
- __ pop(edi);
- __ ret(0);
+ // No SSE2.
+ Label forward;
+ __ cmp(count, 0);
+ __ j(equal, &pop_and_return);
+ __ cmp(dst, src);
+ __ j(above, &backward);
+ __ jmp(&forward);
+ {
+ // Simple forward copier.
+ Label forward_loop_1byte, forward_loop_4byte;
+ __ bind(&forward_loop_4byte);
+ __ mov(eax, Operand(src, 0));
+ __ sub(count, Immediate(4));
+ __ add(src, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ add(dst, Immediate(4));
+ __ bind(&forward); // Entry point.
+ __ cmp(count, 3);
+ __ j(above, &forward_loop_4byte);
+ __ bind(&forward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(count);
+ __ inc(src);
+ __ mov_b(Operand(dst, 0), eax);
+ __ inc(dst);
+ __ jmp(&forward_loop_1byte);
+ }
+ {
+ // Simple backward copier.
+ Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
+ __ bind(&backward);
+ __ add(src, count);
+ __ add(dst, count);
+ __ cmp(count, 3);
+ __ j(below_equal, &entry_shortcut);
+
+ __ bind(&backward_loop_4byte);
+ __ sub(src, Immediate(4));
+ __ sub(count, Immediate(4));
+ __ mov(eax, Operand(src, 0));
+ __ sub(dst, Immediate(4));
+ __ mov(Operand(dst, 0), eax);
+ __ cmp(count, 3);
+ __ j(above, &backward_loop_4byte);
+ __ bind(&backward_loop_1byte);
+ __ cmp(count, 0);
+ __ j(below_equal, &pop_and_return);
+ __ bind(&entry_shortcut);
+ __ dec(src);
+ __ dec(count);
+ __ mov_b(eax, Operand(src, 0));
+ __ dec(dst);
+ __ mov_b(Operand(dst, 0), eax);
+ __ jmp(&backward_loop_1byte);
+ }
}
+ __ bind(&pop_and_return);
+ MemMoveEmitPopAndReturn(&masm);
+
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
-
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
+ return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
}
+
#undef __
// -------------------------------------------------------------------------
diff --git a/deps/v8/src/ia32/cpu-ia32.cc b/deps/v8/src/ia32/cpu-ia32.cc
index 9eabb2a96..2d83cab2d 100644
--- a/deps/v8/src/ia32/cpu-ia32.cc
+++ b/deps/v8/src/ia32/cpu-ia32.cc
@@ -79,6 +79,8 @@ void CPU::DebugBreak() {
// instead
// __asm { int 3 }
__debugbreak();
+#elif defined(__native_client__)
+ asm("hlt");
#else
asm("int $3");
#endif
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index 8cdcf9965..f0436225c 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -90,9 +90,9 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
- memcpy(new_reloc->GetDataStartAddress() + padding,
- code->relocation_info()->GetDataStartAddress(),
- reloc_length);
+ OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
+ code->relocation_info()->GetDataStartAddress(),
+ reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
@@ -177,7 +177,8 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
- memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
+ OS::MemMove(
+ code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
@@ -211,41 +212,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x13;
+static const byte kJnsOffset = 0x11;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
-
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
+// The back edge bookkeeping code matches the pattern:
+//
+// sub <profiling_counter>, <delta>
+// jns ok
+// call <interrupt stub>
+// ok:
+//
+// The patched back edge looks like this:
+//
+// sub <profiling_counter>, <delta> ;; Not changed
+// nop
+// nop
+// call <on-stack replacment>
+// ok:
+
+void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(!InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ // Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // sub <profiling_counter>, <delta>
- // jns ok
- // call <stack guard>
- // test eax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // sub <profiling_counter>, <delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test eax, <loop nesting depth>
- // ok:
-
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
+ // Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -254,27 +253,48 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
}
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ // Restore the original jump.
Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(replacement_code->entry(),
- Assembler::target_address_at(call_target_address));
-
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
+ // Restore the original call address.
Assembler::set_target_address_at(call_target_address,
- check_code->entry());
+ interrupt_code->entry());
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, interrupt_code);
+}
+
+
+#ifdef DEBUG
+bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (*(call_target_address - 3) == kNopByteOne) {
+ ASSERT_EQ(replacement_code->entry(),
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+ return true;
+ } else {
+ ASSERT_EQ(interrupt_code->entry(),
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ return false;
+ }
}
+#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
diff --git a/deps/v8/src/ia32/frames-ia32.h b/deps/v8/src/ia32/frames-ia32.h
index c59b1664a..6223748d6 100644
--- a/deps/v8/src/ia32/frames-ia32.h
+++ b/deps/v8/src/ia32/frames-ia32.h
@@ -60,18 +60,6 @@ const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index efbdf1354..19989b1c6 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -164,6 +164,8 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@@ -351,13 +353,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
// the deoptimization input data found in the optimized code.
RecordBackEdge(stmt->OsrEntryId());
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
EmitProfilingCounterReset();
__ bind(&ok);
@@ -1214,7 +1209,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ push(Immediate(info));
__ CallStub(&stub);
} else {
@@ -1670,24 +1665,33 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
- __ push(Immediate(Smi::FromInt(expr->literal_index())));
- __ push(Immediate(constant_elements));
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ push(Immediate(Smi::FromInt(expr->literal_index())));
+ __ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@@ -1704,6 +1708,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
+ __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+ __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
+ __ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
+ __ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index 4d23aef15..1dcad043e 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -135,7 +135,7 @@ void LCodeGen::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
@@ -366,7 +366,20 @@ bool LCodeGen::GenerateBody() {
Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
}
}
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ FlushX87StackIfNecessary(instr);
+ }
+
instr->CompileToNative(this);
+
+ if (!CpuFeatures::IsSupported(SSE2)) {
+ ASSERT(!instr->HasDoubleRegisterResult() || x87_stack_depth_ == 1);
+
+ if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+ __ VerifyX87StackDepth(x87_stack_depth_);
+ }
+ }
}
}
EnsureSpaceForLazyDeopt();
@@ -521,6 +534,52 @@ bool LCodeGen::IsX87TopOfStack(LOperand* op) const {
}
+void LCodeGen::ReadX87Operand(Operand dst) {
+ ASSERT(x87_stack_depth_ == 1);
+ __ fst_d(dst);
+}
+
+
+void LCodeGen::PushX87DoubleOperand(Operand src) {
+ ASSERT(x87_stack_depth_ == 0);
+ x87_stack_depth_++;
+ __ fld_d(src);
+}
+
+
+void LCodeGen::PushX87FloatOperand(Operand src) {
+ ASSERT(x87_stack_depth_ == 0);
+ x87_stack_depth_++;
+ __ fld_s(src);
+}
+
+
+void LCodeGen::PopX87() {
+ ASSERT(x87_stack_depth_ == 1);
+ x87_stack_depth_--;
+ __ fstp(0);
+}
+
+
+void LCodeGen::CurrentInstructionReturnsX87Result() {
+ ASSERT(x87_stack_depth_ <= 1);
+ if (x87_stack_depth_ == 0) {
+ x87_stack_depth_ = 1;
+ }
+}
+
+
+void LCodeGen::FlushX87StackIfNecessary(LInstruction* instr) {
+ if (x87_stack_depth_ > 0) {
+ if ((instr->ClobbersDoubleRegisters() ||
+ instr->HasDoubleRegisterResult()) &&
+ !instr->HasDoubleRegisterInput()) {
+ PopX87();
+ }
+ }
+}
+
+
Register LCodeGen::ToRegister(LOperand* op) const {
ASSERT(op->IsRegister());
return ToRegister(op->index());
@@ -846,6 +905,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(
void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
ASSERT(environment->HasBeenRegistered());
+ // It's an error to deoptimize with the x87 fp stack in use.
+ ASSERT(x87_stack_depth_ == 0);
int id = environment->deoptimization_index();
ASSERT(info()->IsOptimizing() || info()->IsStub());
Deoptimizer::BailoutType bailout_type = info()->IsStub()
@@ -858,8 +919,8 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
return;
}
- if (FLAG_deopt_every_n_times != 0) {
- Handle<SharedFunctionInfo> shared(info_->shared_info());
+ if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+ Handle<SharedFunctionInfo> shared(info()->shared_info());
Label no_deopt;
__ pushfd();
__ push(eax);
@@ -1689,40 +1750,46 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
- ASSERT(instr->result()->IsDoubleRegister());
- XMMRegister res = ToDoubleRegister(instr->result());
double v = instr->value();
- // Use xor to produce +0.0 in a fast and compact way, but avoid to
- // do so if the constant is -0.0.
- if (BitCast<uint64_t, double>(v) == 0) {
- __ xorps(res, res);
+ uint64_t int_val = BitCast<uint64_t, double>(v);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ __ push(Immediate(lower));
+ __ push(Immediate(upper));
+ PushX87DoubleOperand(Operand(esp, 0));
+ __ add(Operand(esp), Immediate(kDoubleSize));
+ CurrentInstructionReturnsX87Result();
} else {
- Register temp = ToRegister(instr->temp());
- uint64_t int_val = BitCast<uint64_t, double>(v);
- int32_t lower = static_cast<int32_t>(int_val);
- int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
- if (CpuFeatures::IsSupported(SSE4_1)) {
- CpuFeatureScope scope1(masm(), SSE2);
- CpuFeatureScope scope2(masm(), SSE4_1);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(res, Operand(temp));
- __ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
+ CpuFeatureScope scope1(masm(), SSE2);
+ ASSERT(instr->result()->IsDoubleRegister());
+ XMMRegister res = ToDoubleRegister(instr->result());
+ if (int_val == 0) {
+ __ xorps(res, res);
+ } else {
+ Register temp = ToRegister(instr->temp());
+ if (CpuFeatures::IsSupported(SSE4_1)) {
+ CpuFeatureScope scope2(masm(), SSE4_1);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(res, Operand(temp));
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ } else {
+ __ xorps(res, res);
+ __ Set(temp, Immediate(upper));
+ __ pinsrd(res, Operand(temp), 1);
+ }
} else {
- __ xorps(res, res);
__ Set(temp, Immediate(upper));
- __ pinsrd(res, Operand(temp), 1);
- }
- } else {
- CpuFeatureScope scope(masm(), SSE2);
- __ Set(temp, Immediate(upper));
- __ movd(res, Operand(temp));
- __ psllq(res, 32);
- if (lower != 0) {
- __ Set(temp, Immediate(lower));
- __ movd(xmm0, Operand(temp));
- __ por(res, xmm0);
+ __ movd(res, Operand(temp));
+ __ psllq(res, 32);
+ if (lower != 0) {
+ __ Set(temp, Immediate(lower));
+ __ movd(xmm0, Operand(temp));
+ __ por(res, xmm0);
+ }
}
}
}
@@ -1951,6 +2018,9 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
break;
case Token::DIV:
__ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
+ __ movaps(left, left);
break;
case Token::MOD: {
// Pass two doubles as arguments on the stack.
@@ -3158,16 +3228,16 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movss(result, operand);
__ cvtss2sd(result, result);
} else {
- __ fld_s(operand);
- HandleX87FPReturnValue(instr);
+ PushX87FloatOperand(operand);
+ CurrentInstructionReturnsX87Result();
}
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
__ movdbl(ToDoubleRegister(instr->result()), operand);
} else {
- __ fld_d(operand);
- HandleX87FPReturnValue(instr);
+ PushX87DoubleOperand(operand);
+ CurrentInstructionReturnsX87Result();
}
} else {
Register result(ToRegister(instr->result()));
@@ -3212,29 +3282,6 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
}
-void LCodeGen::HandleX87FPReturnValue(LInstruction* instr) {
- if (IsX87TopOfStack(instr->result())) {
- // Return value is already on stack. If the value has no uses, then
- // pop it off the FP stack. Otherwise, make sure that there are enough
- // copies of the value on the stack to feed all of the usages, e.g.
- // when the following instruction uses the return value in multiple
- // inputs.
- int count = instr->hydrogen_value()->UseCount();
- if (count == 0) {
- __ fstp(0);
- } else {
- count--;
- ASSERT(count <= 7);
- while (count-- > 0) {
- __ fld(0);
- }
- }
- } else {
- __ fstp_d(ToOperand(instr->result()));
- }
-}
-
-
void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
int offset = FixedDoubleArray::kHeaderSize - kHeapObjectTag +
@@ -3261,8 +3308,8 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
__ movdbl(result, double_load_operand);
} else {
- __ fld_d(double_load_operand);
- HandleX87FPReturnValue(instr);
+ PushX87DoubleOperand(double_load_operand);
+ CurrentInstructionReturnsX87Result();
}
}
@@ -3598,7 +3645,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
factory()->heap_number_map());
@@ -3651,7 +3698,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ test(input_reg, Operand(input_reg));
Label is_positive;
@@ -3663,19 +3710,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
ASSERT(instr->value()->Equals(instr->result()));
@@ -3702,7 +3748,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
@@ -3828,7 +3874,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3981,7 +4027,7 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
CpuFeatureScope scope(masm(), SSE2);
ASSERT(instr->value()->Equals(instr->result()));
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -4024,7 +4070,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
@@ -4032,7 +4078,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
@@ -4040,7 +4086,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
@@ -4048,36 +4094,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->function()).is(edi));
@@ -4311,12 +4327,21 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
0,
instr->additional_index()));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- CpuFeatureScope scope(masm(), SSE2);
- __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
- __ movss(operand, xmm0);
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ cvtsd2ss(xmm0, ToDoubleRegister(instr->value()));
+ __ movss(operand, xmm0);
+ } else {
+ __ fld(0);
+ __ fstp_s(operand);
+ }
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatureScope scope(masm(), SSE2);
- __ movdbl(operand, ToDoubleRegister(instr->value()));
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ __ movdbl(operand, ToDoubleRegister(instr->value()));
+ } else {
+ __ fst_d(operand);
+ }
} else {
Register value = ToRegister(instr->value());
switch (elements_kind) {
@@ -4351,21 +4376,8 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), SSE2);
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
-
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
Operand double_store_operand = BuildFastArrayOperand(
instr->elements(),
instr->key(),
@@ -4373,7 +4385,68 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
FAST_DOUBLE_ELEMENTS,
FixedDoubleArray::kHeaderSize - kHeapObjectTag,
instr->additional_index());
- __ movdbl(double_store_operand, value);
+
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister value = ToDoubleRegister(instr->value());
+
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
+ }
+
+ __ movdbl(double_store_operand, value);
+ } else {
+ // Can't use SSE2 in the serializer
+ if (instr->hydrogen()->IsConstantHoleStore()) {
+ // This means we should store the (double) hole. No floating point
+ // registers required.
+ double nan_double = FixedDoubleArray::hole_nan_as_double();
+ uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+ int32_t lower = static_cast<int32_t>(int_val);
+ int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
+
+ __ mov(double_store_operand, Immediate(lower));
+ Operand double_store_operand2 = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag + kPointerSize,
+ instr->additional_index());
+ __ mov(double_store_operand2, Immediate(upper));
+ } else {
+ Label no_special_nan_handling;
+ ASSERT(x87_stack_depth_ > 0);
+
+ if (instr->NeedsCanonicalization()) {
+ __ fld(0);
+ __ fld(0);
+ __ FCmp();
+
+ __ j(parity_odd, &no_special_nan_handling);
+ __ sub(esp, Immediate(kDoubleSize));
+ __ fst_d(MemOperand(esp, 0));
+ __ cmp(MemOperand(esp, sizeof(kHoleNanLower32)),
+ Immediate(kHoleNanUpper32));
+ __ add(esp, Immediate(kDoubleSize));
+ Label canonicalize;
+ __ j(not_equal, &canonicalize);
+ __ jmp(&no_special_nan_handling);
+ __ bind(&canonicalize);
+ __ fstp(0);
+ __ fld_d(Operand::StaticVariable(canonical_nan_reference));
+ }
+
+ __ bind(&no_special_nan_handling);
+ __ fst_d(double_store_operand);
+ }
+ }
}
@@ -4805,9 +4878,6 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ ucomisd(input_reg, input_reg);
} else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
__ fld(0);
__ fld(0);
__ FCmp();
@@ -4829,6 +4899,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ j(not_equal, &canonicalize);
__ add(esp, Immediate(kDoubleSize));
__ mov(reg, factory()->the_hole_value());
+ if (!use_sse2) {
+ __ fstp(0);
+ }
__ jmp(&done);
__ bind(&canonicalize);
__ add(esp, Immediate(kDoubleSize));
@@ -4858,10 +4931,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
__ movdbl(FieldOperand(reg, HeapNumber::kValueOffset), input_reg);
} else {
- if (!IsX87TopOfStack(instr->value())) {
- __ fld_d(ToOperand(instr->value()));
- }
- __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+ __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
}
__ bind(&done);
}
@@ -4909,6 +4979,79 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
}
+void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
+ Register temp_reg,
+ bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode) {
+ Label load_smi, done;
+
+ if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+ // Smi check.
+ __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ if (deoptimize_on_undefined) {
+ DeoptimizeIf(not_equal, env);
+ } else {
+ Label heap_number;
+ __ j(equal, &heap_number, Label::kNear);
+
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, env);
+
+ // Convert undefined to NaN.
+ ExternalReference nan =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ fld_d(Operand::StaticVariable(nan));
+ __ jmp(&done, Label::kNear);
+ __ bind(&heap_number);
+ }
+ // Heap number to x87 conversion.
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ if (deoptimize_on_minus_zero) {
+ __ fldz();
+ __ FCmp();
+ __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+ __ j(not_zero, &done, Label::kNear);
+
+ // Use general purpose registers to check if we have -0.0
+ __ mov(temp_reg, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ test(temp_reg, Immediate(HeapNumber::kSignMask));
+ __ j(zero, &done, Label::kNear);
+
+ // Pop FPU stack before deoptimizing.
+ __ fstp(0);
+ DeoptimizeIf(not_zero, env);
+ }
+ __ jmp(&done, Label::kNear);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_OR_HOLE) {
+ __ test(input_reg, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, env);
+ } else if (mode == NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE) {
+ __ test(input_reg, Immediate(kSmiTagMask));
+ __ j(zero, &load_smi);
+ ExternalReference hole_nan_reference =
+ ExternalReference::address_of_the_hole_nan();
+ __ fld_d(Operand::StaticVariable(hole_nan_reference));
+ __ jmp(&done, Label::kNear);
+ } else {
+ ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+ }
+
+ __ bind(&load_smi);
+ __ SmiUntag(input_reg); // Untag smi before converting to float.
+ __ push(input_reg);
+ __ fild_s(Operand(esp, 0));
+ __ pop(input_reg);
+ __ SmiTag(input_reg); // Retag smi.
+ __ bind(&done);
+}
+
+
void LCodeGen::EmitNumberUntagD(Register input_reg,
Register temp_reg,
XMMRegister result_reg,
@@ -5021,7 +5164,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ fisttp_d(Operand(esp, 0));
__ mov(input_reg, Operand(esp, 0)); // Low word of answer is the result.
__ add(Operand(esp), Immediate(kDoubleSize));
- } else {
+ } else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
__ movdbl(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
@@ -5035,6 +5178,8 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ ucomisd(xmm_temp, xmm0);
DeoptimizeIf(not_equal, instr->environment());
DeoptimizeIf(parity_even, instr->environment()); // NaN.
+ } else {
+ UNREACHABLE();
}
} else if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope scope(masm(), SSE2);
@@ -5079,18 +5224,169 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
LOperand* input = instr->value();
ASSERT(input->IsRegister());
- ASSERT(input->Equals(instr->result()));
-
Register input_reg = ToRegister(input);
+ ASSERT(input_reg.is(ToRegister(instr->result())));
DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
- // Smi check.
__ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg);
+ __ bind(deferred->exit());
+}
- // Smi to int32 conversion
- __ SmiUntag(input_reg); // Untag smi.
+void LCodeGen::DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
+ Label done, heap_number;
+ Register result_reg = ToRegister(instr->result());
+ Register input_reg = ToRegister(instr->value());
+
+ // Heap number map check.
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ // Check for undefined. Undefined is converted to zero for truncating
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ __ RecordComment("Deferred TaggedToI: cannot truncate");
+ DeoptimizeIf(not_equal, instr->environment());
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done, Label::kFar);
+ __ bind(&heap_number);
+
+ // Surprisingly, all of this crazy bit manipulation is considerably
+ // faster than using the built-in x86 CPU conversion functions (about 6x).
+ Label right_exponent, adjust_bias, zero_result;
+ Register scratch = ToRegister(instr->scratch());
+ Register scratch2 = ToRegister(instr->scratch2());
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ if (instr->truncating()) {
+ __ j(zero, &zero_result);
+ } else {
+ __ j(not_zero, &adjust_bias);
+ __ test(scratch, Immediate(HeapNumber::kMantissaMask));
+ DeoptimizeIf(not_zero, instr->environment());
+ __ cmp(FieldOperand(input_reg, HeapNumber::kMantissaOffset), Immediate(0));
+ DeoptimizeIf(not_equal, instr->environment());
+ __ bind(&adjust_bias);
+ }
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
+ if (!instr->truncating()) {
+ DeoptimizeIf(negative, instr->environment());
+ } else {
+ __ j(negative, &zero_result);
+ }
+
+ // Get the second half of the double. For some exponents we don't
+ // actually need this because the bits get shifted out again, but
+ // it's probably slower to test than just to do it.
+ Register scratch3 = ToRegister(instr->scratch3());
+ __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ xor_(result_reg, result_reg);
+
+ const uint32_t non_int32_exponent = 31;
+ __ cmp(scratch2, Immediate(non_int32_exponent));
+ // If we have a match of the int32 exponent then skip some logic.
+ __ j(equal, &right_exponent, Label::kNear);
+ // If the number doesn't find in an int32, deopt.
+ DeoptimizeIf(greater, instr->environment());
+
+ // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+ // < 31.
+ __ mov(result_reg, Immediate(31));
+ __ sub(result_reg, scratch2);
+
+ __ bind(&right_exponent);
+
+ // Save off exponent for negative check later.
+ __ mov(scratch2, scratch);
+
+ // Here result_reg is the shift, scratch is the exponent word.
+ // Get the top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1.
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up the mantissa bits to take up the space the exponent used to
+ // take. We have kExponentShift + 1 significant bits int he low end of the
+ // word. Shift them to the top bits.
+ const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 1;
+ __ shl(scratch, shift_distance);
+ if (!instr->truncating()) {
+ // If not truncating, a non-zero value in the bottom 22 bits means a
+ // non-integral value --> trigger a deopt.
+ __ test(scratch3, Immediate((1 << (32 - shift_distance)) - 1));
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+ // Shift down 22 bits to get the most significant 10 bits or the low
+ // mantissa word.
+ __ shr(scratch3, 32 - shift_distance);
+ __ or_(scratch3, scratch);
+ if (!instr->truncating()) {
+ // If truncating, a non-zero value in the bits that will be shifted away
+ // when adjusting the exponent means rounding --> deopt.
+ __ mov(scratch, 0x1);
+ ASSERT(result_reg.is(ecx));
+ __ shl_cl(scratch);
+ __ dec(scratch);
+ __ test(scratch3, scratch);
+ DeoptimizeIf(not_equal, instr->environment());
+ }
+ // Move down according to the exponent.
+ ASSERT(result_reg.is(ecx));
+ __ shr_cl(scratch3);
+ // Now the unsigned 32-bit answer is in scratch3. We need to move it to
+ // result_reg and we may need to fix the sign.
+ Label negative_result;
+ __ xor_(result_reg, result_reg);
+ __ cmp(scratch2, result_reg);
+ __ j(less, &negative_result, Label::kNear);
+ __ cmp(scratch3, result_reg);
+ __ mov(result_reg, scratch3);
+ // If the result is > MAX_INT, result doesn't fit in signed 32-bit --> deopt.
+ DeoptimizeIf(less, instr->environment());
+ __ jmp(&done, Label::kNear);
+ __ bind(&zero_result);
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done, Label::kNear);
+ __ bind(&negative_result);
+ __ sub(result_reg, scratch3);
+ if (!instr->truncating()) {
+ // -0.0 triggers a deopt.
+ DeoptimizeIf(zero, instr->environment());
+ }
+ // If the negative subtraction overflows into a positive number, there was an
+ // overflow --> deopt.
+ DeoptimizeIf(positive, instr->environment());
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToINoSSE2(LTaggedToINoSSE2* instr) {
+ class DeferredTaggedToINoSSE2: public LDeferredCode {
+ public:
+ DeferredTaggedToINoSSE2(LCodeGen* codegen, LTaggedToINoSSE2* instr)
+ : LDeferredCode(codegen), instr_(instr) { }
+ virtual void Generate() { codegen()->DoDeferredTaggedToINoSSE2(instr_); }
+ virtual LInstruction* instr() { return instr_; }
+ private:
+ LTaggedToINoSSE2* instr_;
+ };
+
+ LOperand* input = instr->value();
+ ASSERT(input->IsRegister());
+ Register input_reg = ToRegister(input);
+ ASSERT(input_reg.is(ToRegister(instr->result())));
+
+ DeferredTaggedToINoSSE2* deferred =
+ new(zone()) DeferredTaggedToINoSSE2(this, instr);
+
+ // Smi check.
+ __ JumpIfNotSmi(input_reg, deferred->entry());
+ __ SmiUntag(input_reg); // Untag smi.
__ bind(deferred->exit());
}
@@ -5103,32 +5399,31 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
LOperand* result = instr->result();
ASSERT(result->IsDoubleRegister());
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatureScope scope(masm(), SSE2);
- Register input_reg = ToRegister(input);
- XMMRegister result_reg = ToDoubleRegister(result);
-
- bool deoptimize_on_minus_zero =
- instr->hydrogen()->deoptimize_on_minus_zero();
- Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
-
- NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
- HValue* value = instr->hydrogen()->value();
- if (value->type().IsSmi()) {
- if (value->IsLoadKeyed()) {
- HLoadKeyed* load = HLoadKeyed::cast(value);
- if (load->UsesMustHandleHole()) {
- if (load->hole_mode() == ALLOW_RETURN_HOLE) {
- mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
- } else {
- mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
- }
+ Register input_reg = ToRegister(input);
+ bool deoptimize_on_minus_zero =
+ instr->hydrogen()->deoptimize_on_minus_zero();
+ Register temp_reg = deoptimize_on_minus_zero ? ToRegister(temp) : no_reg;
+
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED;
+ HValue* value = instr->hydrogen()->value();
+ if (value->type().IsSmi()) {
+ if (value->IsLoadKeyed()) {
+ HLoadKeyed* load = HLoadKeyed::cast(value);
+ if (load->UsesMustHandleHole()) {
+ if (load->hole_mode() == ALLOW_RETURN_HOLE) {
+ mode = NUMBER_CANDIDATE_IS_SMI_CONVERT_HOLE;
} else {
- mode = NUMBER_CANDIDATE_IS_SMI;
+ mode = NUMBER_CANDIDATE_IS_SMI_OR_HOLE;
}
+ } else {
+ mode = NUMBER_CANDIDATE_IS_SMI;
}
}
+ }
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(masm(), SSE2);
+ XMMRegister result_reg = ToDoubleRegister(result);
EmitNumberUntagD(input_reg,
temp_reg,
result_reg,
@@ -5137,7 +5432,13 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
instr->environment(),
mode);
} else {
- UNIMPLEMENTED();
+ EmitNumberUntagDNoSSE2(input_reg,
+ temp_reg,
+ instr->hydrogen()->deoptimize_on_undefined(),
+ deoptimize_on_minus_zero,
+ instr->environment(),
+ mode);
+ CurrentInstructionReturnsX87Result();
}
}
@@ -5409,13 +5710,133 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
__ bind(&is_smi);
__ SmiUntag(input_reg);
__ ClampUint8(input_reg);
+ __ bind(&done);
+}
+
+
+void LCodeGen::DoClampTToUint8NoSSE2(LClampTToUint8NoSSE2* instr) {
+ Register input_reg = ToRegister(instr->unclamped());
+ Register result_reg = ToRegister(instr->result());
+ Register scratch = ToRegister(instr->scratch());
+ Register scratch2 = ToRegister(instr->scratch2());
+ Register scratch3 = ToRegister(instr->scratch3());
+ Label is_smi, done, heap_number, valid_exponent,
+ largest_value, zero_result, maybe_nan_or_infinity;
+ __ JumpIfSmi(input_reg, &is_smi);
+
+ // Check for heap number
+ __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+ factory()->heap_number_map());
+ __ j(equal, &heap_number, Label::kFar);
+
+ // Check for undefined. Undefined is converted to zero for clamping
+ // conversions.
+ __ cmp(input_reg, factory()->undefined_value());
+ DeoptimizeIf(not_equal, instr->environment());
+ __ jmp(&zero_result);
+
+ // Heap number
+ __ bind(&heap_number);
+
+ // Surprisingly, all of the hand-crafted bit-manipulations below are much
+ // faster than the x86 FPU built-in instruction, especially since "banker's
+ // rounding" would be additionally very expensive
+
+ // Get exponent word.
+ __ mov(scratch, FieldOperand(input_reg, HeapNumber::kExponentOffset));
+ __ mov(scratch3, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+
+ // Test for negative values --> clamp to zero
+ __ test(scratch, scratch);
+ __ j(negative, &zero_result);
+
+ // Get exponent alone in scratch2.
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, HeapNumber::kExponentMask);
+ __ shr(scratch2, HeapNumber::kExponentShift);
+ __ j(zero, &zero_result);
+ __ sub(scratch2, Immediate(HeapNumber::kExponentBias - 1));
+ __ j(negative, &zero_result);
+
+ const uint32_t non_int8_exponent = 7;
+ __ cmp(scratch2, Immediate(non_int8_exponent + 1));
+ // If the exponent is too big, check for special values.
+ __ j(greater, &maybe_nan_or_infinity, Label::kNear);
+
+ __ bind(&valid_exponent);
+ // Exponent word in scratch, exponent in scratch2. We know that 0 <= exponent
+ // < 7. The shift bias is the number of bits to shift the mantissa such that
+ // with an exponent of 7 such the that top-most one is in bit 30, allowing
+ // detection the rounding overflow of a 255.5 to 256 (bit 31 goes from 0 to
+ // 1).
+ int shift_bias = (30 - HeapNumber::kExponentShift) - 7 - 1;
+ __ lea(result_reg, MemOperand(scratch2, shift_bias));
+ // Here result_reg (ecx) is the shift, scratch is the exponent word. Get the
+ // top bits of the mantissa.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ // Put back the implicit 1 of the mantissa
+ __ or_(scratch, 1 << HeapNumber::kExponentShift);
+ // Shift up to round
+ __ shl_cl(scratch);
+ // Use "banker's rounding" to spec: If fractional part of number is 0.5, then
+ // use the bit in the "ones" place and add it to the "halves" place, which has
+ // the effect of rounding to even.
+ __ mov(scratch2, scratch);
+ const uint32_t one_half_bit_shift = 30 - sizeof(uint8_t) * 8;
+ const uint32_t one_bit_shift = one_half_bit_shift + 1;
+ __ and_(scratch2, Immediate((1 << one_bit_shift) - 1));
+ __ cmp(scratch2, Immediate(1 << one_half_bit_shift));
+ Label no_round;
+ __ j(less, &no_round);
+ Label round_up;
+ __ mov(scratch2, Immediate(1 << one_half_bit_shift));
+ __ j(greater, &round_up);
+ __ test(scratch3, scratch3);
+ __ j(not_zero, &round_up);
+ __ mov(scratch2, scratch);
+ __ and_(scratch2, Immediate(1 << one_bit_shift));
+ __ shr(scratch2, 1);
+ __ bind(&round_up);
+ __ add(scratch, scratch2);
+ __ j(overflow, &largest_value);
+ __ bind(&no_round);
+ __ shr(scratch, 23);
+ __ mov(result_reg, scratch);
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&maybe_nan_or_infinity);
+ // Check for NaN/Infinity, all other values map to 255
+ __ cmp(scratch2, Immediate(HeapNumber::kInfinityOrNanExponent + 1));
+ __ j(not_equal, &largest_value, Label::kNear);
+
+ // Check for NaN, which differs from Infinity in that at least one mantissa
+ // bit is set.
+ __ and_(scratch, HeapNumber::kMantissaMask);
+ __ or_(scratch, FieldOperand(input_reg, HeapNumber::kMantissaOffset));
+ __ j(not_zero, &zero_result); // M!=0 --> NaN
+ // Infinity -> Fall through to map to 255.
+
+ __ bind(&largest_value);
+ __ mov(result_reg, Immediate(255));
+ __ jmp(&done, Label::kNear);
+
+ __ bind(&zero_result);
+ __ xor_(result_reg, result_reg);
+ __ jmp(&done);
+
+ // smi
+ __ bind(&is_smi);
+ if (!input_reg.is(result_reg)) {
+ __ mov(result_reg, input_reg);
+ }
+ __ SmiUntag(result_reg);
+ __ ClampUint8(result_reg);
__ bind(&done);
}
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -5423,15 +5844,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(prototypes->length() == maps->length());
- // TODO(ulan): Move this check to hydrogen and split HCheckPrototypeMaps
- // into two instruction: one that checks the prototypes and another that
- // loads the holder (HConstant). Find a way to do it without breaking
- // parallel recompilation.
if (instr->hydrogen()->CanOmitPrototypeChecks()) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
@@ -5557,11 +5973,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
- }
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5585,8 +6001,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
__ StoreToSafepointRegisterSlot(result, size);
}
__ push(size);
- CallRuntimeFromDeferred(
- Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldPointerSpace, 1, instr, instr->context());
+ } else {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr, instr->context());
+ }
__ StoreToSafepointRegisterSlot(result, eax);
}
@@ -5615,26 +6036,33 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(not_equal, instr->environment());
}
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ push(Immediate(isolate()->factory()->empty_fixed_array()));
-
- // Pick the right runtime function or stub to call.
+ // Set up the parameters to the stub/runtime call and pick the right
+ // runtime function or stub to call. Boilerplate already exists,
+ // constant elements are never accessed, pass an empty fixed array.
int length = instr->hydrogen()->length();
if (instr->hydrogen()->IsCopyOnWrite()) {
ASSERT(instr->hydrogen()->depth() == 1);
+ __ LoadHeapObject(eax, literals);
+ __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
+ __ PushHeapObject(literals);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(isolate()->factory()->empty_fixed_array()));
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ PushHeapObject(literals);
+ __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ push(Immediate(isolate()->factory()->empty_fixed_array()));
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
+ __ LoadHeapObject(eax, literals);
+ __ mov(ebx, Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
+ __ mov(ecx, Immediate(isolate()->factory()->empty_fixed_array()));
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
@@ -5645,178 +6073,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(ecx));
- ASSERT(!result.is(ecx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- if (FLAG_debug_code) {
- __ LoadHeapObject(ecx, object);
- __ cmp(source, ecx);
- __ Assert(equal, "Unexpected object literal boilerplate");
- __ mov(ecx, FieldOperand(source, HeapObject::kMapOffset));
- __ cmp(ecx, Handle<Map>(object->map()));
- __ Assert(equal, "Unexpected boilerplate map");
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, object->GetElementsKind() << Map::kElementsKindShift);
- __ Assert(equal, "Unexpected boilerplate elements kind");
- }
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(ecx, Operand(result, elements_offset));
- } else {
- __ mov(ecx, FieldOperand(source, i));
- }
- __ mov(FieldOperand(result, object_offset + i), ecx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ mov(FieldOperand(result, object_size),
- Immediate(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ mov(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ mov(ecx, FieldOperand(source, i));
- __ mov(FieldOperand(result, elements_offset + i), ecx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(result, total_offset), Immediate(value_low));
- __ mov(FieldOperand(result, total_offset + 4), Immediate(value_high));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(ecx, Operand(result, *offset));
- __ mov(FieldOperand(result, total_offset), ecx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(ecx, Handle<HeapObject>::cast(value));
- __ mov(FieldOperand(result, total_offset), ecx);
- } else {
- __ mov(FieldOperand(result, total_offset), Immediate(value));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- ASSERT(ToRegister(instr->context()).is(esi));
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the literal boilerplate ElementsKind is of a type different than
- // the expected one. The check isn't necessary if the boilerplate has already
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- __ mov(ecx, FieldOperand(ebx, HeapObject::kMapOffset));
- // Load the map's "bit field 2". We only need the first byte,
- // but the following masking takes care of that anyway.
- __ mov(ecx, FieldOperand(ecx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(ecx, Map::kElementsKindMask);
- __ cmp(ecx, boilerplate_elements_kind << Map::kElementsKindShift);
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ Allocate(size, eax, ecx, edx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ push(Immediate(Smi::FromInt(size)));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(ebx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), eax, ebx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
@@ -5923,7 +6179,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
+ FastNewClosureStub stub(shared_info->language_mode(),
+ shared_info->is_generator());
__ push(Immediate(shared_info));
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 3a38e321d..362f091d2 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -68,6 +68,7 @@ class LCodeGen BASE_EMBEDDED {
osr_pc_offset_(-1),
last_lazy_deopt_pc_(0),
frame_is_built_(false),
+ x87_stack_depth_(0),
safepoints_(info->zone()),
resolver_(this),
expected_safepoint_kind_(Safepoint::kSimple) {
@@ -102,10 +103,17 @@ class LCodeGen BASE_EMBEDDED {
return Immediate(ToInteger32(LConstantOperand::cast(op)));
}
- Handle<Object> ToHandle(LConstantOperand* op) const;
+ // Support for non-sse2 (x87) floating point stack handling.
+ // These functions maintain the depth of the stack (either 0 or 1)
+ void PushX87DoubleOperand(Operand src);
+ void PushX87FloatOperand(Operand src);
+ void ReadX87Operand(Operand dst);
+ bool X87StackNonEmpty() const { return x87_stack_depth_ > 0; }
+ void PopX87();
+ void CurrentInstructionReturnsX87Result();
+ void FlushX87StackIfNecessary(LInstruction* instr);
- // A utility for instructions that return floating point values on X87.
- void HandleX87FPReturnValue(LInstruction* instr);
+ Handle<Object> ToHandle(LConstantOperand* op) const;
// The operand denoting the second word (the one with a higher address) of
// a double stack slot.
@@ -129,7 +137,8 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredTaggedToINoSSE2(LTaggedToINoSSE2* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
@@ -281,15 +290,7 @@ class LCodeGen BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@@ -315,6 +316,14 @@ class LCodeGen BASE_EMBEDDED {
LEnvironment* env,
NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+ void EmitNumberUntagDNoSSE2(
+ Register input,
+ Register temp,
+ bool deoptimize_on_undefined,
+ bool deoptimize_on_minus_zero,
+ LEnvironment* env,
+ NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+
// Emits optimized code for typeof x == "y". Modifies input register.
// Returns the condition on which a final split to
// true and false label should be made, to optimize fallthrough.
@@ -404,6 +413,7 @@ class LCodeGen BASE_EMBEDDED {
int osr_pc_offset_;
int last_lazy_deopt_pc_;
bool frame_is_built_;
+ int x87_stack_depth_;
// Builder that keeps track of safepoints in the code. The table
// itself is emitted at the end of the generated code.
diff --git a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
index b062ba5d3..6c7e375ad 100644
--- a/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/deps/v8/src/ia32/lithium-gap-resolver-ia32.cc
@@ -324,29 +324,61 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- XMMRegister src = cgen_->ToDoubleRegister(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movaps(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ XMMRegister src = cgen_->ToDoubleRegister(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movaps(dst, src);
+ } else {
+ ASSERT(destination->IsDoubleStackSlot());
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(dst, src);
+ }
} else {
+ // load from the register onto the stack, store in destination, which must
+ // be a double stack slot in the non-SSE2 case.
+ ASSERT(source->index() == 0); // source is on top of the stack
ASSERT(destination->IsDoubleStackSlot());
Operand dst = cgen_->ToOperand(destination);
- __ movdbl(dst, src);
+ cgen_->ReadX87Operand(dst);
}
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), SSE2);
- ASSERT(destination->IsDoubleRegister() ||
- destination->IsDoubleStackSlot());
- Operand src = cgen_->ToOperand(source);
- if (destination->IsDoubleRegister()) {
- XMMRegister dst = cgen_->ToDoubleRegister(destination);
- __ movdbl(dst, src);
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatureScope scope(cgen_->masm(), SSE2);
+ ASSERT(destination->IsDoubleRegister() ||
+ destination->IsDoubleStackSlot());
+ Operand src = cgen_->ToOperand(source);
+ if (destination->IsDoubleRegister()) {
+ XMMRegister dst = cgen_->ToDoubleRegister(destination);
+ __ movdbl(dst, src);
+ } else {
+ // We rely on having xmm0 available as a fixed scratch register.
+ Operand dst = cgen_->ToOperand(destination);
+ __ movdbl(xmm0, src);
+ __ movdbl(dst, xmm0);
+ }
} else {
- // We rely on having xmm0 available as a fixed scratch register.
- Operand dst = cgen_->ToOperand(destination);
- __ movdbl(xmm0, src);
- __ movdbl(dst, xmm0);
+ // load from the stack slot on top of the floating point stack, and then
+ // store in destination. If destination is a double register, then it
+ // represents the top of the stack and nothing needs to be done.
+ if (destination->IsDoubleStackSlot()) {
+ Register tmp = EnsureTempRegister();
+ Operand src0 = cgen_->ToOperand(source);
+ Operand src1 = cgen_->HighOperand(source);
+ Operand dst0 = cgen_->ToOperand(destination);
+ Operand dst1 = cgen_->HighOperand(destination);
+ __ mov(tmp, src0); // Then use tmp to copy source to destination.
+ __ mov(dst0, tmp);
+ __ mov(tmp, src1);
+ __ mov(dst1, tmp);
+ } else {
+ Operand src = cgen_->ToOperand(source);
+ if (cgen_->X87StackNonEmpty()) {
+ cgen_->PopX87();
+ }
+ cgen_->PushX87DoubleOperand(src);
+ }
}
} else {
UNREACHABLE();
@@ -419,21 +451,19 @@ void LGapResolver::EmitSwap(int index) {
__ movaps(xmm0, src);
__ movaps(src, dst);
__ movaps(dst, xmm0);
-
} else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// XMM register-memory swap. We rely on having xmm0
// available as a fixed scratch register.
ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
- ? source
- : destination);
+ ? source
+ : destination);
Operand other =
cgen_->ToOperand(source->IsDoubleRegister() ? destination : source);
__ movdbl(xmm0, other);
__ movdbl(other, reg);
__ movdbl(reg, Operand(xmm0));
-
} else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), SSE2);
// Double-width memory-to-memory. Spill on demand to use a general
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index 102515a91..a7bb2d95f 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -91,6 +91,22 @@ void LInstruction::VerifyCall() {
#endif
+bool LInstruction::HasDoubleRegisterResult() {
+ return HasResult() && result()->IsDoubleRegister();
+}
+
+
+bool LInstruction::HasDoubleRegisterInput() {
+ for (int i = 0; i < InputCount(); i++) {
+ LOperand* op = InputAt(i);
+ if (op->IsDoubleRegister()) {
+ return true;
+ }
+ }
+ return false;
+}
+
+
void LInstruction::PrintTo(StringStream* stream) {
stream->Add("%s ", this->Mnemonic());
@@ -304,29 +320,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
-void LMathPowHalf::PrintDataTo(StringStream* stream) {
- stream->Add("/pow_half ");
- value()->PrintTo(stream);
-}
-
-
-void LMathRound::PrintDataTo(StringStream* stream) {
- stream->Add("/round ");
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -542,6 +535,11 @@ LOperand* LChunkBuilder::UseFixedDouble(HValue* value, XMMRegister reg) {
}
+LOperand* LChunkBuilder::UseX87TopOfStack(HValue* value) {
+ return Use(value, ToUnallocated(x87tos));
+}
+
+
LOperand* LChunkBuilder::UseRegister(HValue* value) {
return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
}
@@ -1187,62 +1185,105 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* context = UseAny(instr->context()); // Not actually used.
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return DefineSameAsFirst(result);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* context = UseFixed(instr->context(), esi);
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else {
- LOperand* context = UseAny(instr->context()); // Deferred use by MathAbs.
- LOperand* input = NULL;
- if (op == kMathPowHalf) {
- input = UseRegisterAtStart(instr->value());
- LOperand* temp = TempRegister();
- LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
- return DefineSameAsFirst(result);
- } else if (op == kMathRound) {
- input = UseRegister(instr->value());
- LOperand* temp = FixedTemp(xmm4);
- LMathRound* result = new(zone()) LMathRound(context, input, temp);
- return AssignEnvironment(DefineAsRegister(result));
- } else {
- input = UseRegisterAtStart(instr->value());
- }
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
- input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(xmm4);
+ LMathRound* result = new(zone()) LMathRound(context, input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context()); // Deferred use.
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(context, input);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathLog* result = new(zone()) LMathLog(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* context = UseAny(instr->context());
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LOperand* temp = TempRegister();
+ LMathPowHalf* result = new(zone()) LMathPowHalf(context, input, temp);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* context = UseFixed(instr->context(), esi);
@@ -1861,20 +1902,33 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
? TempRegister()
: NULL;
LNumberUntagD* res = new(zone()) LNumberUntagD(value, temp);
- return AssignEnvironment(DefineAsRegister(res));
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ return AssignEnvironment(DefineAsRegister(res));
+ } else {
+ return AssignEnvironment(DefineX87TOS(res));
+ }
} else {
ASSERT(to.IsInteger32());
- LOperand* value = UseRegister(instr->value());
if (instr->value()->type().IsSmi()) {
+ LOperand* value = UseRegister(instr->value());
return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
} else {
bool truncating = instr->CanTruncateToInt32();
- LOperand* xmm_temp =
- (truncating && CpuFeatures::IsSupported(SSE3))
- ? NULL
- : FixedTemp(xmm1);
- LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
- return AssignEnvironment(DefineSameAsFirst(res));
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ LOperand* value = UseRegister(instr->value());
+ LOperand* xmm_temp =
+ (truncating && CpuFeatures::IsSupported(SSE3))
+ ? NULL
+ : FixedTemp(xmm1);
+ LTaggedToI* res = new(zone()) LTaggedToI(value, xmm_temp);
+ return AssignEnvironment(DefineSameAsFirst(res));
+ } else {
+ LOperand* value = UseFixed(instr->value(), ecx);
+ LTaggedToINoSSE2* res =
+ new(zone()) LTaggedToINoSSE2(value, TempRegister(),
+ TempRegister(), TempRegister());
+ return AssignEnvironment(DefineFixed(res, ecx));
+ }
}
}
} else if (from.IsDouble()) {
@@ -1947,7 +2001,7 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
+ return AssignEnvironment(result);
}
@@ -1992,12 +2046,20 @@ LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
} else {
ASSERT(input_rep.IsTagged());
- LOperand* reg = UseFixed(value, eax);
- // Register allocator doesn't (yet) support allocation of double
- // temps. Reserve xmm1 explicitly.
- LOperand* temp = FixedTemp(xmm1);
- LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
- return AssignEnvironment(DefineFixed(result, eax));
+ if (CpuFeatures::IsSupported(SSE2)) {
+ LOperand* reg = UseFixed(value, eax);
+ // Register allocator doesn't (yet) support allocation of double
+ // temps. Reserve xmm1 explicitly.
+ LOperand* temp = FixedTemp(xmm1);
+ LClampTToUint8* result = new(zone()) LClampTToUint8(reg, temp);
+ return AssignEnvironment(DefineFixed(result, eax));
+ } else {
+ LOperand* value = UseRegister(instr->value());
+ LClampTToUint8NoSSE2* res =
+ new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
+ TempRegister(), TempRegister());
+ return AssignEnvironment(DefineFixed(res, ecx));
+ }
}
}
@@ -2018,10 +2080,13 @@ LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
return DefineAsRegister(new(zone()) LConstantI);
} else if (r.IsDouble()) {
double value = instr->DoubleValue();
- LOperand* temp = (BitCast<uint64_t, double>(value) != 0)
- ? TempRegister()
- : NULL;
- return DefineAsRegister(new(zone()) LConstantD(temp));
+ bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ LOperand* temp = value_is_zero ? NULL : TempRegister();
+ return DefineAsRegister(new(zone()) LConstantD(temp));
+ } else {
+ return DefineX87TOS(new(zone()) LConstantD(NULL));
+ }
} else if (r.IsTagged()) {
return DefineAsRegister(new(zone()) LConstantT);
} else {
@@ -2190,6 +2255,27 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
+LOperand* LChunkBuilder::GetStoreKeyedValueOperand(HStoreKeyed* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+
+ // Determine if we need a byte register in this case for the value.
+ bool val_is_fixed_register =
+ elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS;
+ if (val_is_fixed_register) {
+ return UseFixed(instr->value(), eax);
+ }
+
+ if (!CpuFeatures::IsSafeForSnapshot(SSE2) &&
+ IsDoubleOrFloatElementsKind(elements_kind)) {
+ return UseRegisterAtStart(instr->value());
+ }
+
+ return UseRegister(instr->value());
+}
+
+
LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (!instr->is_external()) {
ASSERT(instr->elements()->representation().IsTagged());
@@ -2198,7 +2284,12 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
if (instr->value()->representation().IsDouble()) {
LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
+ LOperand* val = NULL;
+ if (CpuFeatures::IsSafeForSnapshot(SSE2)) {
+ val = UseRegisterAtStart(instr->value());
+ } else if (!instr->IsConstantHoleStore()) {
+ val = UseX87TopOfStack(instr->value());
+ }
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
return new(zone()) LStoreKeyed(object, key, val);
@@ -2228,15 +2319,7 @@ LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
ASSERT(instr->elements()->representation().IsExternal());
LOperand* external_pointer = UseRegister(instr->elements());
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
-
- LOperand* val = val_is_fixed_register
- ? UseFixed(instr->value(), eax)
- : UseRegister(instr->value());
+ LOperand* val = GetStoreKeyedValueOperand(instr);
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
@@ -2400,13 +2483,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- LOperand* context = UseFixed(instr->context(), esi);
- return MarkAsCall(
- DefineFixed(new(zone()) LFastLiteral(context), eax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
LOperand* context = UseFixed(instr->context(), esi);
return MarkAsCall(
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index 1c490bb57..10272fd42 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -74,6 +74,7 @@ class LCodeGen;
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
+ V(ClampTToUint8NoSSE2) \
V(ClassOfTestAndBranch) \
V(CmpIDAndBranch) \
V(CmpObjectEqAndBranch) \
@@ -91,7 +92,6 @@ class LCodeGen;
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -128,11 +128,18 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -167,6 +174,7 @@ class LCodeGen;
V(StringLength) \
V(SubI) \
V(TaggedToI) \
+ V(TaggedToINoSSE2) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
@@ -174,7 +182,6 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf) \
V(ForInPrepareMap) \
@@ -265,6 +272,9 @@ class LInstruction: public ZoneObject {
virtual bool HasResult() const = 0;
virtual LOperand* result() = 0;
+ bool HasDoubleRegisterResult();
+ bool HasDoubleRegisterInput();
+
LOperand* FirstInput() { return InputAt(0); }
LOperand* Output() { return HasResult() ? result() : NULL; }
@@ -645,9 +655,39 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
+class LMathFloor: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathFloor(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound: public LTemplateInstruction<1, 2, 1> {
+ public:
+ LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
+ inputs_[1] = context;
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* context() { return inputs_[1]; }
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs: public LTemplateInstruction<1, 2, 0> {
public:
- LUnaryMathOperation(LOperand* context, LOperand* value) {
+ LMathAbs(LOperand* context, LOperand* value) {
inputs_[1] = context;
inputs_[0] = value;
}
@@ -655,11 +695,56 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathLog: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathSin: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathCos: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
@@ -679,32 +764,24 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
};
-class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
+class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
public:
- LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
- inputs_[1] = context;
+ explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
- temps_[0] = temp;
}
- LOperand* context() { return inputs_[1]; }
LOperand* value() { return inputs_[0]; }
- LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
-class LMathRound: public LTemplateInstruction<1, 2, 1> {
+class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
public:
- LMathRound(LOperand* context, LOperand* value, LOperand* temp) {
+ LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
inputs_[1] = context;
inputs_[0] = value;
temps_[0] = temp;
@@ -714,10 +791,7 @@ class LMathRound: public LTemplateInstruction<1, 2, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
- DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
-
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -1088,6 +1162,10 @@ class LConstantD: public LTemplateInstruction<1, 0, 1> {
temps_[0] = temp;
}
+ virtual bool ClobbersDoubleRegisters() const {
+ return false;
+ }
+
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
@@ -1296,7 +1374,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -2018,6 +2096,31 @@ class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
};
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToINoSSE2: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LTaggedToINoSSE2(LOperand* value,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = value;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
+ LOperand* scratch2() { return temps_[1]; }
+ LOperand* scratch3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(TaggedToINoSSE2, "tagged-to-i-nosse2")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+ bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
class LSmiTag: public LTemplateInstruction<1, 1, 0> {
public:
explicit LSmiTag(LOperand* value) {
@@ -2040,6 +2143,10 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
+ virtual bool ClobbersDoubleRegisters() const {
+ return false;
+ }
+
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change);
};
@@ -2313,7 +2420,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
@@ -2380,6 +2487,30 @@ class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
};
+// Truncating conversion from a tagged value to an int32.
+class LClampTToUint8NoSSE2: public LTemplateInstruction<1, 1, 3> {
+ public:
+ LClampTToUint8NoSSE2(LOperand* unclamped,
+ LOperand* temp1,
+ LOperand* temp2,
+ LOperand* temp3) {
+ inputs_[0] = unclamped;
+ temps_[0] = temp1;
+ temps_[1] = temp2;
+ temps_[2] = temp3;
+ }
+
+ LOperand* unclamped() { return inputs_[0]; }
+ LOperand* scratch() { return temps_[0]; }
+ LOperand* scratch2() { return temps_[1]; }
+ LOperand* scratch3() { return temps_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8NoSSE2,
+ "clamp-t-to-uint8-nosse2")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+};
+
+
class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
@@ -2424,19 +2555,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 1> {
};
-class LFastLiteral: public LTemplateInstruction<1, 1, 0> {
- public:
- explicit LFastLiteral(LOperand* context) {
- inputs_[0] = context;
- }
-
- LOperand* context() { return inputs_[0]; }
-
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
class LArrayLiteral: public LTemplateInstruction<1, 1, 0> {
public:
explicit LArrayLiteral(LOperand* context) {
@@ -2712,6 +2830,17 @@ class LChunkBuilder BASE_EMBEDDED {
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
@@ -2742,6 +2871,7 @@ class LChunkBuilder BASE_EMBEDDED {
MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
XMMRegister fixed_register);
+ MUST_USE_RESULT LOperand* UseX87TopOfStack(HValue* value);
// A value that is guaranteed to be allocated to a register.
// Operand created by UseRegister is guaranteed to be live until the end of
@@ -2827,6 +2957,8 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoArithmeticT(Token::Value op,
HArithmeticBinaryOperation* instr);
+ LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
+
LPlatformChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 3d3dabca4..3228e8370 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -1024,69 +1024,65 @@ void MacroAssembler::ThrowUncatchable(Register value) {
void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss) {
Label same_contexts;
- ASSERT(!holder_reg.is(scratch));
+ ASSERT(!holder_reg.is(scratch1));
+ ASSERT(!holder_reg.is(scratch2));
+ ASSERT(!scratch1.is(scratch2));
// Load current lexical context from the stack frame.
- mov(scratch, Operand(ebp, StandardFrameConstants::kContextOffset));
+ mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
// When generating debug code, make sure the lexical context is set.
if (emit_debug_code()) {
- cmp(scratch, Immediate(0));
+ cmp(scratch1, Immediate(0));
Check(not_equal, "we should not have an empty lexical context");
}
// Load the native context of the current context.
int offset =
Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, offset));
- mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+ mov(scratch1, FieldOperand(scratch1, offset));
+ mov(scratch1, FieldOperand(scratch1, GlobalObject::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
- push(scratch);
// Read the first word and compare to native_context_map.
- mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
- cmp(scratch, isolate()->factory()->native_context_map());
+ cmp(FieldOperand(scratch1, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(scratch);
}
// Check if both contexts are the same.
- cmp(scratch, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+ cmp(scratch1, FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
j(equal, &same_contexts);
// Compare security tokens, save holder_reg on the stack so we can use it
// as a temporary register.
//
- // TODO(119): avoid push(holder_reg)/pop(holder_reg)
- push(holder_reg);
// Check that the security token in the calling global object is
// compatible with the security token in the receiving global
// object.
- mov(holder_reg,
+ mov(scratch2,
FieldOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
// Check the context is a native context.
if (emit_debug_code()) {
- cmp(holder_reg, isolate()->factory()->null_value());
+ cmp(scratch2, isolate()->factory()->null_value());
Check(not_equal, "JSGlobalProxy::context() should not be null.");
- push(holder_reg);
// Read the first word and compare to native_context_map(),
- mov(holder_reg, FieldOperand(holder_reg, HeapObject::kMapOffset));
- cmp(holder_reg, isolate()->factory()->native_context_map());
+ cmp(FieldOperand(scratch2, HeapObject::kMapOffset),
+ isolate()->factory()->native_context_map());
Check(equal, "JSGlobalObject::native_context should be a native context.");
- pop(holder_reg);
}
int token_offset = Context::kHeaderSize +
Context::SECURITY_TOKEN_INDEX * kPointerSize;
- mov(scratch, FieldOperand(scratch, token_offset));
- cmp(scratch, FieldOperand(holder_reg, token_offset));
- pop(holder_reg);
+ mov(scratch1, FieldOperand(scratch1, token_offset));
+ cmp(scratch1, FieldOperand(scratch2, token_offset));
j(not_equal, miss);
bind(&same_contexts);
@@ -2522,6 +2518,28 @@ void MacroAssembler::Ret(int bytes_dropped, Register scratch) {
}
+void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+ // Make sure the floating point stack is either empty or has depth items.
+ ASSERT(depth <= 7);
+
+ // The top-of-stack (tos) is 7 if there is one item pushed.
+ int tos = (8 - depth) % 8;
+ const int kTopMask = 0x3800;
+ push(eax);
+ fwait();
+ fnstsw_ax();
+ and_(eax, kTopMask);
+ shr(eax, 11);
+ cmp(eax, Immediate(tos));
+ Label all_ok;
+ j(equal, &all_ok);
+ Check(equal, "Unexpected FPU stack depth after instruction");
+ bind(&all_ok);
+ fnclex();
+ pop(eax);
+}
+
+
void MacroAssembler::Drop(int stack_elements) {
if (stack_elements > 0) {
add(esp, Immediate(stack_elements * kPointerSize));
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index b3dae7320..159ae6e6b 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -548,7 +548,8 @@ class MacroAssembler: public Assembler {
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
- Register scratch,
+ Register scratch1,
+ Register scratch2,
Label* miss);
void GetNumberHash(Register r0, Register scratch);
@@ -806,6 +807,8 @@ class MacroAssembler: public Assembler {
return code_object_;
}
+ // Insert code to verify that the x87 stack has the specified depth (0-7)
+ void VerifyX87StackDepth(uint32_t depth);
// ---------------------------------------------------------------------------
// StatsCounter support
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index cb4b4a731..cb3c68ea8 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -726,33 +726,54 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+ if (Serializer::enabled()) {
+ __ mov(scratch, Immediate(cell));
+ __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+ Immediate(the_hole));
+ } else {
+ __ cmp(Operand::Cell(cell), Immediate(the_hole));
+ }
+ __ j(not_equal, miss);
+}
+
+
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- Label* miss_restore_name) {
+void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label,
+ Label* miss_restore_name) {
// Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
+ miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
}
// Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
@@ -771,12 +792,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
- if (lookup->holder() == *object &&
- !holder->HasFastProperties() &&
- !holder->IsJSGlobalProxy() &&
- !holder->IsJSGlobalObject()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ if (lookup->holder() == *object) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm,
+ Handle<GlobalObject>(GlobalObject::cast(holder)),
+ name,
+ scratch1,
+ miss_restore_name);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ }
}
}
@@ -785,7 +812,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch1); // Return address.
@@ -801,33 +828,29 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- int index;
- if (!transition.is_null()) {
- // Update the map of the object.
- __ mov(scratch1, Immediate(transition));
- __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+ // Update the map of the object.
+ __ mov(scratch1, Immediate(transition));
+ __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
- } else {
- index = lookup->GetFieldIndex().field_index();
- }
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -864,26 +887,71 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
- if (Serializer::enabled()) {
- __ mov(scratch, Immediate(cell));
- __ cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- Immediate(the_hole));
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, scratch2, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ mov(FieldOperand(receiver_reg, offset), value_reg);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(receiver_reg,
+ offset,
+ name_reg,
+ scratch1,
+ kDontSaveFPRegs);
} else {
- __ cmp(Operand::Cell(cell), Immediate(the_hole));
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ mov(FieldOperand(scratch1, offset), eax);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kDontSaveFPRegs);
}
- __ j(not_equal, miss);
+
+ // Return the value (register eax).
+ ASSERT(value_reg.is(eax));
+ __ ret(0);
}
@@ -972,10 +1040,6 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
} else {
bool in_new_space = heap()->InNewSpace(*prototype);
Handle<Map> current_map(current->map());
- if (in_new_space) {
- // Save the map in scratch1 for later.
- __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
- }
if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
__ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK,
ALLOW_ELEMENT_TRANSITION_MAPS);
@@ -985,8 +1049,14 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// the map check so that we know that the object is actually a global
// object.
if (current->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch2, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
+
+ if (in_new_space) {
+ // Save the map in scratch1 for later.
+ __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+ }
+
reg = holder_reg; // From now on the object will be in holder_reg.
if (in_new_space) {
@@ -1020,7 +1090,7 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
// Perform security check for access to the global object.
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
if (holder->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(reg, scratch1, miss);
+ __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
}
// If we've skipped any global objects, it's not enough to verify that
@@ -1111,7 +1181,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
Handle<GlobalObject> global) {
Label miss;
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1119,13 +1189,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- if (!last->HasFastProperties()) {
- __ mov(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ mov(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
HandlerFrontendFooter(success, &miss);
}
@@ -2657,7 +2720,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
- __ CheckAccessGlobalProxy(edx, ebx, &miss);
+ __ CheckAccessGlobalProxy(receiver(), scratch1(), scratch2(), &miss);
}
// Stub never generated for non-global objects that require access
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index 515c6f71a..3c33e4ff3 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -1586,7 +1586,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeStoreField(
- name, receiver, lookup, Handle<Map>::null(), strict_mode);
+ name, receiver, lookup, strict_mode);
case NORMAL:
if (receiver->IsGlobalObject()) {
// The stub generated for the global object picks the value directly
@@ -1644,7 +1644,7 @@ Handle<Code> StoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
if (details.type() != FIELD || details.attributes() != NONE) break;
- return isolate()->stub_cache()->ComputeStoreField(
+ return isolate()->stub_cache()->ComputeStoreTransition(
name, receiver, lookup, transition, strict_mode);
}
case NONEXISTENT:
@@ -1987,7 +1987,7 @@ Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
switch (lookup->type()) {
case FIELD:
return isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup, Handle<Map>::null(), strict_mode);
+ name, receiver, lookup, strict_mode);
case TRANSITION: {
// Explicitly pass in the receiver map since LookupForWrite may have
// stored something else than the receiver in the holder.
@@ -1999,7 +1999,7 @@ Handle<Code> KeyedStoreIC::ComputeStoreMonomorphic(LookupResult* lookup,
PropertyDetails details = target_descriptors->GetDetails(descriptor);
if (details.type() == FIELD && details.attributes() == NONE) {
- return isolate()->stub_cache()->ComputeKeyedStoreField(
+ return isolate()->stub_cache()->ComputeKeyedStoreTransition(
name, receiver, lookup, transition, strict_mode);
}
// fall through.
@@ -2386,8 +2386,7 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ic.patch(*code);
}
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::SUB:
@@ -2524,8 +2523,7 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
}
}
- Handle<JSBuiltinsObject> builtins = Handle<JSBuiltinsObject>(
- isolate->thread_local_top()->context_->builtins(), isolate);
+ Handle<JSBuiltinsObject> builtins(isolate->js_builtins_object());
Object* builtin = NULL; // Initialization calms down the compiler.
switch (op) {
case Token::ADD:
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 04155195a..ca2c0257f 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -371,6 +371,12 @@ Isolate::PerIsolateThreadData*
Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThisThread() {
ThreadId thread_id = ThreadId::Current();
+ return FindPerThreadDataForThread(thread_id);
+}
+
+
+Isolate::PerIsolateThreadData* Isolate::FindPerThreadDataForThread(
+ ThreadId thread_id) {
PerIsolateThreadData* per_thread = NULL;
{
ScopedLock lock(process_wide_mutex_);
@@ -1140,7 +1146,7 @@ void Isolate::PrintCurrentStackTrace(FILE* out) {
Execution::GetStackTraceLine(recv, fun, pos_obj, is_top_level);
if (line->length() > 0) {
line->PrintOn(out);
- fprintf(out, "\n");
+ PrintF(out, "\n");
}
}
}
@@ -1213,6 +1219,7 @@ bool Isolate::IsErrorObject(Handle<Object> obj) {
return false;
}
+static int fatal_exception_depth = 0;
void Isolate::DoThrow(Object* exception, MessageLocation* location) {
ASSERT(!has_pending_exception());
@@ -1296,6 +1303,21 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
thread_local_top()->pending_message_start_pos_ = location->start_pos();
thread_local_top()->pending_message_end_pos_ = location->end_pos();
}
+
+ // If the abort-on-uncaught-exception flag is specified, abort on any
+ // exception not caught by JavaScript, even when an external handler is
+ // present. This flag is intended for use by JavaScript developers, so
+ // print a user-friendly stack trace (not an internal one).
+ if (fatal_exception_depth == 0 &&
+ FLAG_abort_on_uncaught_exception &&
+ (report_exception || can_be_caught_externally)) {
+ fatal_exception_depth++;
+ PrintF(stderr,
+ "%s\n\nFROM\n",
+ *MessageHandler::GetLocalizedMessage(this, message_obj));
+ PrintCurrentStackTrace(stderr);
+ OS::Abort();
+ }
} else if (location != NULL && !location->script().is_null()) {
// We are bootstrapping and caught an error where the location is set
// and we have a script for the location.
@@ -1508,14 +1530,12 @@ bool Isolate::is_out_of_memory() {
Handle<Context> Isolate::native_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->native_context());
+ return Handle<Context>(context()->global_object()->native_context());
}
Handle<Context> Isolate::global_context() {
- GlobalObject* global = thread_local_top()->context_->global_object();
- return Handle<Context>(global->global_context());
+ return Handle<Context>(context()->global_object()->global_context());
}
@@ -1542,11 +1562,8 @@ Handle<Context> Isolate::GetCallingNativeContext() {
char* Isolate::ArchiveThread(char* to) {
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateExitedJS(this);
- }
- memcpy(to, reinterpret_cast<char*>(thread_local_top()),
- sizeof(ThreadLocalTop));
+ OS::MemCopy(to, reinterpret_cast<char*>(thread_local_top()),
+ sizeof(ThreadLocalTop));
InitializeThreadLocal();
clear_pending_exception();
clear_pending_message();
@@ -1556,8 +1573,8 @@ char* Isolate::ArchiveThread(char* to) {
char* Isolate::RestoreThread(char* from) {
- memcpy(reinterpret_cast<char*>(thread_local_top()), from,
- sizeof(ThreadLocalTop));
+ OS::MemCopy(reinterpret_cast<char*>(thread_local_top()), from,
+ sizeof(ThreadLocalTop));
// This might be just paranoia, but it seems to be needed in case a
// thread_local_top_ is restored on a separate OS thread.
#ifdef USE_SIMULATOR
@@ -1567,9 +1584,6 @@ char* Isolate::RestoreThread(char* from) {
thread_local_top()->simulator_ = Simulator::current(this);
#endif
#endif
- if (RuntimeProfiler::IsEnabled() && current_vm_state() == JS) {
- RuntimeProfiler::IsolateEnteredJS(this);
- }
ASSERT(context() == NULL || context()->IsContext());
return from + sizeof(ThreadLocalTop);
}
@@ -1810,7 +1824,8 @@ void Isolate::Deinit() {
if (FLAG_hydrogen_stats) GetHStatistics()->Print();
// We must stop the logger before we tear down other components.
- logger_->EnsureTickerStopped();
+ Sampler* sampler = logger_->sampler();
+ if (sampler && sampler->IsActive()) sampler->Stop();
delete deoptimizer_data_;
deoptimizer_data_ = NULL;
@@ -1826,11 +1841,6 @@ void Isolate::Deinit() {
preallocated_message_space_ = NULL;
PreallocatedMemoryThreadStop();
- delete heap_profiler_;
- heap_profiler_ = NULL;
- delete cpu_profiler_;
- cpu_profiler_ = NULL;
-
if (runtime_profiler_ != NULL) {
runtime_profiler_->TearDown();
delete runtime_profiler_;
@@ -1839,6 +1849,11 @@ void Isolate::Deinit() {
heap_.TearDown();
logger_->TearDown();
+ delete heap_profiler_;
+ heap_profiler_ = NULL;
+ delete cpu_profiler_;
+ cpu_profiler_ = NULL;
+
// The default isolate is re-initializable due to legacy API.
state_ = UNINITIALIZED;
}
@@ -2055,13 +2070,12 @@ bool Isolate::Init(Deserializer* des) {
date_cache_ = new DateCache();
code_stub_interface_descriptors_ =
new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
+ cpu_profiler_ = new CpuProfiler(this);
+ heap_profiler_ = new HeapProfiler(heap());
// Enable logging before setting up the heap
logger_->SetUp();
- cpu_profiler_ = new CpuProfiler(this);
- heap_profiler_ = new HeapProfiler(heap());
-
// Initialize other runtime facilities
#if defined(USE_SIMULATOR)
#if defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
@@ -2177,9 +2191,16 @@ bool Isolate::Init(Deserializer* des) {
// Ensure that all stubs which need to be generated ahead of time, but
// cannot be serialized into the snapshot have been generated.
HandleScope scope(this);
- StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
CodeStub::GenerateFPStubs(this);
+ StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
StubFailureTrampolineStub::GenerateAheadOfTime(this);
+ // TODO(mstarzinger): The following is an ugly hack to make sure the
+ // interface descriptor is initialized even when stubs have been
+ // deserialized out of the snapshot without the graph builder.
+ FastCloneShallowArrayStub stub(FastCloneShallowArrayStub::CLONE_ELEMENTS,
+ DONT_TRACK_ALLOCATION_SITE, 0);
+ stub.InitializeInterfaceDescriptor(
+ this, code_stub_interface_descriptor(CodeStub::FastCloneShallowArray));
}
if (FLAG_parallel_recompilation) optimizing_compiler_thread_.Start();
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index f7a81d30d..927ad0e0f 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -497,6 +497,10 @@ class Isolate {
// If one does not yet exist, return null.
PerIsolateThreadData* FindPerThreadDataForThisThread();
+ // Find the PerThread for given (isolate, thread) combination
+ // If one does not yet exist, return null.
+ PerIsolateThreadData* FindPerThreadDataForThread(ThreadId thread_id);
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Get the debugger from the default isolate. Preinitializes the
// default isolate if needed.
@@ -645,7 +649,7 @@ class Isolate {
}
inline Address* handler_address() { return &thread_local_top_.handler_; }
- // Bottom JS entry (see StackTracer::Trace in log.cc).
+ // Bottom JS entry (see StackTracer::Trace in sampler.cc).
static Address js_entry_sp(ThreadLocalTop* thread) {
return thread->js_entry_sp_;
}
@@ -1027,23 +1031,6 @@ class Isolate {
}
void SetCurrentVMState(StateTag state) {
- if (RuntimeProfiler::IsEnabled()) {
- // Make sure thread local top is initialized.
- ASSERT(thread_local_top_.isolate_ == this);
- StateTag current_state = thread_local_top_.current_vm_state_;
- if (current_state != JS && state == JS) {
- // Non-JS -> JS transition.
- RuntimeProfiler::IsolateEnteredJS(this);
- } else if (current_state == JS && state != JS) {
- // JS -> non-JS transition.
- RuntimeProfiler::IsolateExitedJS(this);
- } else {
- // Other types of state transitions are not interesting to the
- // runtime profiler, because they don't affect whether we're
- // in JS or not.
- ASSERT((current_state == JS) == (state == JS));
- }
- }
thread_local_top_.current_vm_state_ = state;
}
@@ -1480,7 +1467,6 @@ class PostponeInterruptsScope BASE_EMBEDDED {
#define HEAP (v8::internal::Isolate::Current()->heap())
#define FACTORY (v8::internal::Isolate::Current()->factory())
#define ISOLATE (v8::internal::Isolate::Current())
-#define LOGGER (v8::internal::Isolate::Current()->logger())
// Tells whether the native context is marked with out of memory.
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index ac34c59b2..74850cae2 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -102,9 +102,37 @@ class JsonParser BASE_EMBEDDED {
Handle<String> ParseJsonString() {
return ScanJsonString<false>();
}
+
+ bool ParseJsonString(Handle<String> expected) {
+ int length = expected->length();
+ if (source_->length() - position_ - 1 > length) {
+ AssertNoAllocation no_gc;
+ String::FlatContent content = expected->GetFlatContent();
+ if (content.IsAscii()) {
+ ASSERT_EQ('"', c0_);
+ const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
+ const uint8_t* expected_chars = content.ToOneByteVector().start();
+ for (int i = 0; i < length; i++) {
+ uint8_t c0 = input_chars[i];
+ if (c0 != expected_chars[i] ||
+ c0 == '"' || c0 < 0x20 || c0 == '\\') {
+ return false;
+ }
+ }
+ if (input_chars[length] == '"') {
+ position_ = position_ + length + 1;
+ AdvanceSkipWhitespace();
+ return true;
+ }
+ }
+ }
+ return false;
+ }
+
Handle<String> ParseJsonInternalizedString() {
return ScanJsonString<true>();
}
+
template <bool is_internalized>
Handle<String> ScanJsonString();
// Creates a new string and copies prefix[start..end] into the beginning
@@ -294,8 +322,13 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
HandleScope scope(isolate());
Handle<JSObject> json_object =
factory()->NewJSObject(object_constructor(), pretenure_);
+ Handle<Map> map(json_object->map());
+ ZoneScope zone_scope(zone(), DELETE_ON_EXIT);
+ ZoneList<Handle<Object> > properties(8, zone());
ASSERT_EQ(c0_, '{');
+ bool transitioning = true;
+
AdvanceSkipWhitespace();
if (c0_ != '}') {
do {
@@ -339,24 +372,75 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
c0_ = '"';
#endif
- Handle<String> key = ParseJsonInternalizedString();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+ Handle<String> key;
+ Handle<Object> value;
+
+ // Try to follow existing transitions as long as possible. Once we stop
+ // transitioning, no transition can be found anymore.
+ if (transitioning) {
+ // First check whether there is a single expected transition. If so, try
+ // to parse it first.
+ bool follow_expected = false;
+ if (seq_ascii) {
+ key = JSObject::ExpectedTransitionKey(map);
+ follow_expected = !key.is_null() && ParseJsonString(key);
+ }
+ // If the expected transition hits, follow it.
+ if (follow_expected) {
+ map = JSObject::ExpectedTransitionTarget(map);
+ } else {
+ // If the expected transition failed, parse an internalized string and
+ // try to find a matching transition.
+ key = ParseJsonInternalizedString();
+ if (key.is_null()) return ReportUnexpectedCharacter();
+
+ Handle<Map> target = JSObject::FindTransitionToField(map, key);
+ // If a transition was found, follow it and continue.
+ if (!target.is_null()) {
+ map = target;
+ } else {
+ // If no transition was found, commit the intermediate state to the
+ // object and stop transitioning.
+ JSObject::TransitionToMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ json_object->FastPropertyAtPut(i, *properties[i]);
+ }
+ transitioning = false;
+ }
+ }
+ if (c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
- if (JSObject::TryTransitionToField(json_object, key)) {
- int index = json_object->LastAddedFieldIndex();
- json_object->FastPropertyAtPut(index, *value);
+ properties.Add(value, zone());
+ if (transitioning) continue;
} else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ key = ParseJsonInternalizedString();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+
+ AdvanceSkipWhitespace();
+ value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
}
+
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
} while (MatchSkipWhiteSpace(','));
if (c0_ != '}') {
return ReportUnexpectedCharacter();
}
+
+ // If we transitioned until the very end, transition the map now.
+ if (transitioning) {
+ JSObject::TransitionToMap(json_object, map);
+ int length = properties.length();
+ for (int i = 0; i < length; i++) {
+ json_object->FastPropertyAtPut(i, *properties[i]);
+ }
+ }
}
AdvanceSkipWhitespace();
return scope.CloseAndEscape(json_object);
@@ -644,22 +728,32 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
uint32_t capacity = string_table->Capacity();
uint32_t entry = StringTable::FirstProbe(hash, capacity);
uint32_t count = 1;
+ Handle<String> result;
while (true) {
Object* element = string_table->KeyAt(entry);
if (element == isolate()->heap()->undefined_value()) {
// Lookup failure.
+ result = factory()->InternalizeOneByteString(
+ seq_source_, position_, length);
break;
}
if (element != isolate()->heap()->the_hole_value() &&
String::cast(element)->IsOneByteEqualTo(string_vector)) {
- // Lookup success, update the current position.
- position_ = position;
- // Advance past the last '"'.
- AdvanceSkipWhitespace();
- return Handle<String>(String::cast(element), isolate());
+ result = Handle<String>(String::cast(element), isolate());
+#ifdef DEBUG
+ uint32_t hash_field =
+ (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
+ ASSERT_EQ(static_cast<int>(result->Hash()),
+ static_cast<int>(hash_field >> String::kHashShift));
+#endif
+ break;
}
entry = StringTable::NextProbe(entry, count++, capacity);
}
+ position_ = position;
+ // Advance past the last '"'.
+ AdvanceSkipWhitespace();
+ return result;
}
int beg_pos = position_;
@@ -682,14 +776,10 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
}
} while (c0_ != '"');
int length = position_ - beg_pos;
- Handle<String> result;
- if (seq_ascii && is_internalized) {
- result = factory()->InternalizeOneByteString(seq_source_, beg_pos, length);
- } else {
- result = factory()->NewRawOneByteString(length, pretenure_);
- uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
- String::WriteToFlat(*source_, dest, beg_pos, position_);
- }
+ Handle<String> result = factory()->NewRawOneByteString(length, pretenure_);
+ uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
+ String::WriteToFlat(*source_, dest, beg_pos, position_);
+
ASSERT_EQ('"', c0_);
// Advance past the last '"'.
AdvanceSkipWhitespace();
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
index bcdd64ce7..3bab324e1 100644
--- a/deps/v8/src/json-stringifier.h
+++ b/deps/v8/src/json-stringifier.h
@@ -296,18 +296,23 @@ MaybeObject* BasicJsonStringifier::StringifyString(Isolate* isolate,
}
FlattenString(object);
- String::FlatContent flat = object->GetFlatContent();
- if (flat.IsAscii()) {
+ ASSERT(object->IsFlat());
+ if (object->IsOneByteRepresentationUnderneath()) {
+ Handle<String> result =
+ isolate->factory()->NewRawOneByteString(worst_case_length);
+ AssertNoAllocation no_alloc;
return StringifyString_<SeqOneByteString>(
isolate,
- flat.ToOneByteVector(),
- isolate->factory()->NewRawOneByteString(worst_case_length));
+ object->GetFlatContent().ToOneByteVector(),
+ result);
} else {
- ASSERT(flat.IsTwoByte());
+ Handle<String> result =
+ isolate->factory()->NewRawTwoByteString(worst_case_length);
+ AssertNoAllocation no_alloc;
return StringifyString_<SeqTwoByteString>(
isolate,
- flat.ToUC16Vector(),
- isolate->factory()->NewRawTwoByteString(worst_case_length));
+ object->GetFlatContent().ToUC16Vector(),
+ result);
}
}
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index e94d3c8e3..b0e14e196 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -25,8 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Array = global.Array;
+// var $String = global.String;
+
var $JSON = global.JSON;
+// -------------------------------------------------------------------
+
function Revive(holder, name, reviver) {
var val = holder[name];
if (IS_OBJECT(val)) {
@@ -207,14 +214,23 @@ function JSONStringify(value, replacer, space) {
}
+// -------------------------------------------------------------------
+
function SetUpJSON() {
%CheckIsBootstrapping();
+
+ // Set up non-enumerable properties of the JSON object.
InstallFunctions($JSON, DONT_ENUM, $Array(
"parse", JSONParse,
"stringify", JSONStringify
));
}
+SetUpJSON();
+
+
+// -------------------------------------------------------------------
+// JSON Builtins
function JSONSerializeAdapter(key, object) {
var holder = {};
@@ -222,5 +238,3 @@ function JSONSerializeAdapter(key, object) {
// No need to pass the actual holder since there is no replacer function.
return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
}
-
-SetUpJSON();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index b490521bc..fd87a8053 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -624,7 +624,8 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
index);
if (result == RE_SUCCESS) {
// Copy capture results to the start of the registers array.
- memcpy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
+ OS::MemCopy(
+ output, raw_output, number_of_capture_registers * sizeof(int32_t));
}
if (result == RE_EXCEPTION) {
ASSERT(!isolate->has_pending_exception());
diff --git a/deps/v8/src/list-inl.h b/deps/v8/src/list-inl.h
index 7a84313cd..408859e45 100644
--- a/deps/v8/src/list-inl.h
+++ b/deps/v8/src/list-inl.h
@@ -29,6 +29,7 @@
#define V8_LIST_INL_H_
#include "list.h"
+#include "platform.h"
namespace v8 {
namespace internal {
@@ -87,7 +88,7 @@ template<typename T, class P>
void List<T, P>::Resize(int new_capacity, P alloc) {
ASSERT_LE(length_, new_capacity);
T* new_data = NewData(new_capacity, alloc);
- memcpy(new_data, data_, length_ * sizeof(T));
+ OS::MemCopy(new_data, data_, length_ * sizeof(T));
List<T, P>::DeleteData(data_);
data_ = new_data;
capacity_ = new_capacity;
diff --git a/deps/v8/src/lithium-allocator.cc b/deps/v8/src/lithium-allocator.cc
index 7049a58fd..7bddef7f9 100644
--- a/deps/v8/src/lithium-allocator.cc
+++ b/deps/v8/src/lithium-allocator.cc
@@ -1546,6 +1546,9 @@ void LAllocator::AllocateRegisters() {
LiveRange* current = unhandled_live_ranges_.RemoveLast();
ASSERT(UnhandledIsSorted());
LifetimePosition position = current->Start();
+#ifdef DEBUG
+ allocation_finger_ = position;
+#endif
TraceAlloc("Processing interval %d start=%d\n",
current->id(),
position.Value());
@@ -1670,6 +1673,7 @@ void LAllocator::AddToInactive(LiveRange* range) {
void LAllocator::AddToUnhandledSorted(LiveRange* range) {
if (range == NULL || range->IsEmpty()) return;
ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+ ASSERT(allocation_finger_.Value() <= range->Start().Value());
for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
LiveRange* cur_range = unhandled_live_ranges_.at(i);
if (range->ShouldBeAllocatedBefore(cur_range)) {
@@ -1788,7 +1792,7 @@ STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
bool LAllocator::TryAllocateFreeReg(LiveRange* current) {
LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; i++) {
+ for (int i = 0; i < num_registers_; i++) {
free_until_pos[i] = LifetimePosition::MaxPosition();
}
@@ -1880,7 +1884,7 @@ void LAllocator::AllocateBlockedReg(LiveRange* current) {
LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
- for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+ for (int i = 0; i < num_registers_; i++) {
use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
}
@@ -2000,7 +2004,15 @@ void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
if (next_pos == NULL) {
SpillAfter(range, spill_pos);
} else {
- SpillBetween(range, spill_pos, next_pos->pos());
+ // When spilling between spill_pos and next_pos ensure that the range
+ // remains spilled at least until the start of the current live range.
+ // This guarantees that we will not introduce new unhandled ranges that
+ // start before the current range as this violates allocation invariant
+ // and will lead to an inconsistent state of active and inactive
+ // live-ranges: ranges are allocated in order of their start positions,
+ // ranges are retired from active/inactive when the start of the
+ // current live-range is larger than their end.
+ SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
}
if (!AllocationOk()) return;
ActiveToHandled(range);
@@ -2114,6 +2126,14 @@ void LAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
void LAllocator::SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end) {
+ SpillBetweenUntil(range, start, start, end);
+}
+
+
+void LAllocator::SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end) {
CHECK(start.Value() < end.Value());
LiveRange* second_part = SplitRangeAt(range, start);
if (!AllocationOk()) return;
@@ -2124,7 +2144,7 @@ void LAllocator::SpillBetween(LiveRange* range,
// and put the rest to unhandled.
LiveRange* third_part = SplitBetween(
second_part,
- second_part->Start().InstructionEnd(),
+ Max(second_part->Start().InstructionEnd(), until),
end.PrevInstruction().InstructionEnd());
if (!AllocationOk()) return;
diff --git a/deps/v8/src/lithium-allocator.h b/deps/v8/src/lithium-allocator.h
index 70f3182be..8b45531d9 100644
--- a/deps/v8/src/lithium-allocator.h
+++ b/deps/v8/src/lithium-allocator.h
@@ -536,11 +536,18 @@ class LAllocator BASE_EMBEDDED {
// Spill the given life range after position pos.
void SpillAfter(LiveRange* range, LifetimePosition pos);
- // Spill the given life range after position start and up to position end.
+ // Spill the given life range after position [start] and up to position [end].
void SpillBetween(LiveRange* range,
LifetimePosition start,
LifetimePosition end);
+ // Spill the given life range after position [start] and up to position [end].
+ // Range is guaranteed to be spilled at least until position [until].
+ void SpillBetweenUntil(LiveRange* range,
+ LifetimePosition start,
+ LifetimePosition until,
+ LifetimePosition end);
+
void SplitAndSpillIntersecting(LiveRange* range);
// If we are trying to spill a range inside the loop try to
@@ -625,6 +632,10 @@ class LAllocator BASE_EMBEDDED {
// Indicates success or failure during register allocation.
bool allocation_ok_;
+#ifdef DEBUG
+ LifetimePosition allocation_finger_;
+#endif
+
DISALLOW_COPY_AND_ASSIGN(LAllocator);
};
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index 58c846a88..b28cd3e87 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -609,7 +609,7 @@ static void CompileScriptForTracker(Isolate* isolate, Handle<Script> script) {
CompilationInfoWithZone info(script);
info.MarkAsGlobal();
// Parse and don't allow skipping lazy functions.
- if (ParserApi::Parse(&info, kNoParsingFlags)) {
+ if (Parser::Parse(&info)) {
// Compile the code.
LiveEditFunctionTracker tracker(info.isolate(), info.function());
if (Compiler::MakeCodeForLiveEdit(&info)) {
@@ -1435,8 +1435,8 @@ class RelocInfoBuffer {
// Copy the data.
int curently_used_size =
static_cast<int>(buffer_ + buffer_size_ - reloc_info_writer_.pos());
- memmove(new_buffer + new_buffer_size - curently_used_size,
- reloc_info_writer_.pos(), curently_used_size);
+ OS::MemMove(new_buffer + new_buffer_size - curently_used_size,
+ reloc_info_writer_.pos(), curently_used_size);
reloc_info_writer_.Reposition(
new_buffer + new_buffer_size - curently_used_size,
@@ -1488,7 +1488,7 @@ static Handle<Code> PatchPositionsInCode(
if (buffer.length() == code->relocation_size()) {
// Simply patch relocation area of code.
- memcpy(code->relocation_start(), buffer.start(), buffer.length());
+ OS::MemCopy(code->relocation_start(), buffer.start(), buffer.length());
return code;
} else {
// Relocation info section now has different size. We cannot simply
@@ -1761,9 +1761,9 @@ static const char* DropFrames(Vector<StackFrame*> frames,
StackFrame* pre_pre_frame = frames[top_frame_index - 2];
- memmove(padding_start + kPointerSize - shortage_bytes,
- padding_start + kPointerSize,
- Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
+ OS::MemMove(padding_start + kPointerSize - shortage_bytes,
+ padding_start + kPointerSize,
+ Debug::FramePaddingLayout::kFrameBaseSize * kPointerSize);
pre_top_frame->UpdateFp(pre_top_frame->fp() - shortage_bytes);
pre_pre_frame->SetCallerFp(pre_top_frame->fp());
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index 45ac403a6..cef7dbab2 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -167,8 +167,9 @@ void Log::OpenFile(const char* name) {
// Open the low-level log file.
size_t len = strlen(name);
ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLowLevelLogExt)));
- memcpy(ll_name.start(), name, len);
- memcpy(ll_name.start() + len, kLowLevelLogExt, sizeof(kLowLevelLogExt));
+ OS::MemCopy(ll_name.start(), name, len);
+ OS::MemCopy(ll_name.start() + len,
+ kLowLevelLogExt, sizeof(kLowLevelLogExt));
ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
setvbuf(ll_output_handle_, NULL, _IOFBF, kLowLevelLogBufferSize);
}
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index 94385373c..153889464 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -112,45 +112,6 @@ class Profiler: public Thread {
//
-// StackTracer implementation
-//
-DISABLE_ASAN void StackTracer::Trace(Isolate* isolate, TickSample* sample) {
- ASSERT(isolate->IsInitialized());
-
- // Avoid collecting traces while doing GC.
- if (sample->state == GC) return;
-
- const Address js_entry_sp =
- Isolate::js_entry_sp(isolate->thread_local_top());
- if (js_entry_sp == 0) {
- // Not executing JS now.
- return;
- }
-
- const Address callback = isolate->external_callback();
- if (callback != NULL) {
- sample->external_callback = callback;
- sample->has_external_callback = true;
- } else {
- // Sample potential return address value for frameless invocation of
- // stubs (we'll figure out later, if this value makes sense).
- sample->tos = Memory::Address_at(sample->sp);
- sample->has_external_callback = false;
- }
-
- SafeStackTraceFrameIterator it(isolate,
- sample->fp, sample->sp,
- sample->sp, js_entry_sp);
- int i = 0;
- while (!it.done() && i < TickSample::kMaxFramesCount) {
- sample->stack[i++] = it.frame()->pc();
- it.Advance();
- }
- sample->frames_count = i;
-}
-
-
-//
// Ticker used to provide ticks to the profiler and the sliding state
// window.
//
@@ -179,11 +140,6 @@ class Ticker: public Sampler {
if (IsActive()) Stop();
}
- protected:
- virtual void DoSampleStack(TickSample* sample) {
- StackTracer::Trace(isolate(), sample);
- }
-
private:
Profiler* profiler_;
};
@@ -216,9 +172,10 @@ void Profiler::Engage() {
Start();
// Register to get ticks.
- LOGGER->ticker_->SetProfiler(this);
+ Logger* logger = isolate_->logger();
+ logger->ticker_->SetProfiler(this);
- LOGGER->ProfilerBeginEvent();
+ logger->ProfilerBeginEvent();
}
@@ -226,7 +183,7 @@ void Profiler::Disengage() {
if (!engaged_) return;
// Stop receiving ticks.
- LOGGER->ticker_->ClearProfiler();
+ isolate_->logger()->ticker_->ClearProfiler();
// Terminate the worker thread by setting running_ to false,
// inserting a fake element in the queue and then wait for
@@ -406,7 +363,7 @@ class Logger::NameBuffer {
void AppendBytes(const char* bytes, int size) {
size = Min(size, kUtf8BufferSize - utf8_pos_);
- memcpy(utf8_buffer_ + utf8_pos_, bytes, size);
+ OS::MemCopy(utf8_buffer_ + utf8_pos_, bytes, size);
utf8_pos_ += size;
}
@@ -778,11 +735,10 @@ void Logger::RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache) {
}
-void Logger::LogRuntime(Isolate* isolate,
- Vector<const char> format,
+void Logger::LogRuntime(Vector<const char> format,
JSArray* args) {
if (!log_->IsEnabled() || !FLAG_log_runtime) return;
- HandleScope scope(isolate);
+ HandleScope scope(isolate_);
LogMessageBuilder msg(this);
for (int i = 0; i < format.length(); i++) {
char c = format[i];
@@ -899,12 +855,12 @@ void Logger::DeleteEvent(const char* name, void* object) {
void Logger::NewEventStatic(const char* name, void* object, size_t size) {
- LOGGER->NewEvent(name, object, size);
+ Isolate::Current()->logger()->NewEvent(name, object, size);
}
void Logger::DeleteEventStatic(const char* name, void* object) {
- LOGGER->DeleteEvent(name, object);
+ Isolate::Current()->logger()->DeleteEvent(name, object);
}
void Logger::CallbackEventInternal(const char* prefix, Name* name,
@@ -1492,13 +1448,7 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.Append(',');
msg.AppendAddress(sample->sp);
msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
- if (sample->has_external_callback) {
- msg.Append(",1,");
- msg.AppendAddress(sample->external_callback);
- } else {
- msg.Append(",0,");
- msg.AppendAddress(sample->tos);
- }
+ msg.AppendAddress(sample->external_callback);
msg.Append(",%d", static_cast<int>(sample->state));
if (overflow) {
msg.Append(",overflow");
@@ -1560,11 +1510,6 @@ void Logger::LogFailure() {
}
-bool Logger::IsProfilerSamplerActive() {
- return ticker_->IsActive();
-}
-
-
class EnumerateOptimizedFunctionsVisitor: public OptimizedFunctionVisitor {
public:
EnumerateOptimizedFunctionsVisitor(Handle<SharedFunctionInfo>* sfis,
@@ -1937,17 +1882,6 @@ Sampler* Logger::sampler() {
}
-void Logger::EnsureTickerStarted() {
- ASSERT(ticker_ != NULL);
- if (!ticker_->IsActive()) ticker_->Start();
-}
-
-
-void Logger::EnsureTickerStopped() {
- if (ticker_ != NULL && ticker_->IsActive()) ticker_->Stop();
-}
-
-
FILE* Logger::TearDown() {
if (!is_initialized_) return NULL;
is_initialized_ = false;
@@ -1965,65 +1899,4 @@ FILE* Logger::TearDown() {
return log_->Close();
}
-
-// Protects the state below.
-static Mutex* active_samplers_mutex = NULL;
-
-List<Sampler*>* SamplerRegistry::active_samplers_ = NULL;
-
-
-void SamplerRegistry::SetUp() {
- if (!active_samplers_mutex) {
- active_samplers_mutex = OS::CreateMutex();
- }
-}
-
-
-bool SamplerRegistry::IterateActiveSamplers(VisitSampler func, void* param) {
- ScopedLock lock(active_samplers_mutex);
- for (int i = 0;
- ActiveSamplersExist() && i < active_samplers_->length();
- ++i) {
- func(active_samplers_->at(i), param);
- }
- return ActiveSamplersExist();
-}
-
-
-static void ComputeCpuProfiling(Sampler* sampler, void* flag_ptr) {
- bool* flag = reinterpret_cast<bool*>(flag_ptr);
- *flag |= sampler->IsProfiling();
-}
-
-
-SamplerRegistry::State SamplerRegistry::GetState() {
- bool flag = false;
- if (!IterateActiveSamplers(&ComputeCpuProfiling, &flag)) {
- return HAS_NO_SAMPLERS;
- }
- return flag ? HAS_CPU_PROFILING_SAMPLERS : HAS_SAMPLERS;
-}
-
-
-void SamplerRegistry::AddActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- if (active_samplers_ == NULL) {
- active_samplers_ = new List<Sampler*>;
- } else {
- ASSERT(!active_samplers_->Contains(sampler));
- }
- active_samplers_->Add(sampler);
-}
-
-
-void SamplerRegistry::RemoveActiveSampler(Sampler* sampler) {
- ASSERT(sampler->IsActive());
- ScopedLock lock(active_samplers_mutex);
- ASSERT(active_samplers_ != NULL);
- bool removed = active_samplers_->RemoveElement(sampler);
- ASSERT(removed);
- USE(removed);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index a5eddc7a3..26833302a 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -74,6 +74,7 @@ namespace internal {
class LogMessageBuilder;
class Profiler;
class Semaphore;
+struct TickSample;
class Ticker;
class Isolate;
class PositionsRecorder;
@@ -168,9 +169,6 @@ class Logger {
void SetCodeEventHandler(uint32_t options,
JitCodeEventHandler event_handler);
- void EnsureTickerStarted();
- void EnsureTickerStopped();
-
Sampler* sampler();
// Frees resources acquired in SetUp.
@@ -327,7 +325,7 @@ class Logger {
void RegExpCompileEvent(Handle<JSRegExp> regexp, bool in_cache);
// Log an event reported from generated code
- void LogRuntime(Isolate* isolate, Vector<const char> format, JSArray* args);
+ void LogRuntime(Vector<const char> format, JSArray* args);
bool is_logging() {
return logging_nesting_ > 0;
@@ -448,9 +446,6 @@ class Logger {
void UncheckedIntEvent(const char* name, int value);
void UncheckedIntPtrTEvent(const char* name, intptr_t value);
- // Returns whether profiler's sampler is active.
- bool IsProfilerSamplerActive();
-
Isolate* isolate_;
// The sampler used by the profiler and the sliding state window.
@@ -471,7 +466,6 @@ class Logger {
friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
- friend class StackTracer;
friend class VMState;
friend class LoggerTestHelper;
@@ -510,46 +504,6 @@ class Logger {
};
-// Process wide registry of samplers.
-class SamplerRegistry : public AllStatic {
- public:
- enum State {
- HAS_NO_SAMPLERS,
- HAS_SAMPLERS,
- HAS_CPU_PROFILING_SAMPLERS
- };
-
- static void SetUp();
-
- typedef void (*VisitSampler)(Sampler*, void*);
-
- static State GetState();
-
- // Iterates over all active samplers keeping the internal lock held.
- // Returns whether there are any active samplers.
- static bool IterateActiveSamplers(VisitSampler func, void* param);
-
- // Adds/Removes an active sampler.
- static void AddActiveSampler(Sampler* sampler);
- static void RemoveActiveSampler(Sampler* sampler);
-
- private:
- static bool ActiveSamplersExist() {
- return active_samplers_ != NULL && !active_samplers_->is_empty();
- }
-
- static List<Sampler*>* active_samplers_;
-
- DISALLOW_IMPLICIT_CONSTRUCTORS(SamplerRegistry);
-};
-
-
-// Class that extracts stack trace, used for profiling.
-class StackTracer : public AllStatic {
- public:
- static void Trace(Isolate* isolate, TickSample* sample);
-};
-
} } // namespace v8::internal
diff --git a/deps/v8/src/macro-assembler.h b/deps/v8/src/macro-assembler.h
index 55dccfa95..9fdf2ee7d 100644
--- a/deps/v8/src/macro-assembler.h
+++ b/deps/v8/src/macro-assembler.h
@@ -51,7 +51,9 @@ enum AllocationFlags {
// Align the allocation to a multiple of kDoubleSize
DOUBLE_ALIGNMENT = 1 << 3,
// Directly allocate in old pointer space
- PRETENURE_OLD_POINTER_SPACE = 1 << 4
+ PRETENURE_OLD_POINTER_SPACE = 1 << 4,
+ // Directly allocate in old data space
+ PRETENURE_OLD_DATA_SPACE = 1 << 5
};
@@ -175,17 +177,26 @@ class AllocationUtils {
public:
static ExternalReference GetAllocationTopReference(
Isolate* isolate, AllocationFlags flags) {
- return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ?
- ExternalReference::old_pointer_space_allocation_top_address(isolate) :
- ExternalReference::new_space_allocation_top_address(isolate);
+ if ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) {
+ return ExternalReference::old_pointer_space_allocation_top_address(
+ isolate);
+ } else if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ return ExternalReference::old_data_space_allocation_top_address(isolate);
+ }
+ return ExternalReference::new_space_allocation_top_address(isolate);
}
static ExternalReference GetAllocationLimitReference(
Isolate* isolate, AllocationFlags flags) {
- return ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) ?
- ExternalReference::old_pointer_space_allocation_limit_address(isolate) :
- ExternalReference::new_space_allocation_limit_address(isolate);
+ if ((flags & PRETENURE_OLD_POINTER_SPACE) != 0) {
+ return ExternalReference::old_pointer_space_allocation_limit_address(
+ isolate);
+ } else if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+ return ExternalReference::old_data_space_allocation_limit_address(
+ isolate);
+ }
+ return ExternalReference::new_space_allocation_limit_address(isolate);
}
};
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 7503f24cb..f49179f67 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -672,8 +672,8 @@ static int FreeListFragmentation(PagedSpace* space, Page* p) {
return 0;
}
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
+ PagedSpace::SizeStats sizes;
+ space->ObtainFreeListStatistics(p, &sizes);
intptr_t ratio;
intptr_t ratio_threshold;
@@ -812,8 +812,8 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
if (!p->WasSwept()) {
free_bytes = (p->area_size() - p->LiveBytes());
} else {
- FreeList::SizeStats sizes;
- space->CountFreeListItems(p, &sizes);
+ PagedSpace::SizeStats sizes;
+ space->ObtainFreeListStatistics(p, &sizes);
free_bytes = sizes.Total();
}
@@ -3125,8 +3125,6 @@ void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
- Heap::RelocationLock relocation_lock(heap());
-
bool code_slots_filtering_required;
{ GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
code_slots_filtering_required = MarkInvalidatedCode();
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 0e0254102..e5ab70cc1 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Object = global.Object;
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
@@ -35,10 +38,9 @@ var $abs = MathAbs;
// Instance class name can only be set on functions. That is the only
// purpose for MathConstructor.
function MathConstructor() {}
-%FunctionSetInstanceClassName(MathConstructor, 'Math');
var $Math = new MathConstructor();
-%SetPrototype($Math, $Object.prototype);
-%SetProperty(global, "Math", $Math, DONT_ENUM);
+
+// -------------------------------------------------------------------
// ECMA 262 - 15.8.2.1
function MathAbs(x) {
@@ -216,6 +218,11 @@ function MathTan(x) {
function SetUpMath() {
%CheckIsBootstrapping();
+
+ %SetPrototype($Math, $Object.prototype);
+ %SetProperty(global, "Math", $Math, DONT_ENUM);
+ %FunctionSetInstanceClassName(MathConstructor, 'Math');
+
// Set up math constants.
// ECMA-262, section 15.8.1.1.
%OptimizeObjectForAddingMultipleProperties($Math, 8);
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index d51d38475..67fe3ccf1 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -98,9 +98,13 @@ var kMessages = {
observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
observe_notify_non_notifier: ["notify called on non-notifier object"],
+ proto_poison_pill: ["Generic use of __proto__ accessor not allowed"],
// RangeError
invalid_array_length: ["Invalid array length"],
invalid_array_buffer_length: ["Invalid array buffer length"],
+ invalid_typed_array_offset: ["Start offset is too large"],
+ invalid_typed_array_length: ["Length is too large"],
+ invalid_typed_array_alignment: ["%0", "of", "%1", "should be a multiple of", "%3"],
stack_overflow: ["Maximum call stack size exceeded"],
invalid_time_value: ["Invalid time value"],
// SyntaxError
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index 9c9f611ed..d922bfac6 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -81,29 +81,17 @@ bool Operand::is_reg() const {
int Register::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return kMaxNumAllocatableRegisters;
- } else {
- return kMaxNumAllocatableRegisters - kGPRsPerNonFPUDouble;
- }
}
int DoubleRegister::NumRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumRegisters;
- } else {
- return 1;
- }
}
int DoubleRegister::NumAllocatableRegisters() {
- if (CpuFeatures::IsSupported(FPU)) {
return FPURegister::kMaxNumAllocatableRegisters;
- } else {
- return 1;
- }
}
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index c255d0fbd..e36b97f18 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -80,29 +80,24 @@ static uint64_t CpuFeaturesImpliedByCompiler() {
const char* DoubleRegister::AllocationIndexToString(int index) {
- if (CpuFeatures::IsSupported(FPU)) {
- ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
- const char* const names[] = {
- "f0",
- "f2",
- "f4",
- "f6",
- "f8",
- "f10",
- "f12",
- "f14",
- "f16",
- "f18",
- "f20",
- "f22",
- "f24",
- "f26"
- };
- return names[index];
- } else {
- ASSERT(index == 0);
- return "sfpd0";
- }
+ ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+ const char* const names[] = {
+ "f0",
+ "f2",
+ "f4",
+ "f6",
+ "f8",
+ "f10",
+ "f12",
+ "f14",
+ "f16",
+ "f18",
+ "f20",
+ "f22",
+ "f24",
+ "f26"
+ };
+ return names[index];
}
@@ -127,10 +122,8 @@ void CpuFeatures::Probe() {
// If the compiler is allowed to use fpu then we can use fpu too in our
// code generation.
#if !defined(__mips__)
- // For the simulator=mips build, use FPU when FLAG_enable_fpu is enabled.
- if (FLAG_enable_fpu) {
- supported_ |= static_cast<uint64_t>(1) << FPU;
- }
+ // For the simulator build, use FPU.
+ supported_ |= static_cast<uint64_t>(1) << FPU;
#else
// Probe for additional features not already known to be available.
if (OS::MipsCpuHasFeature(FPU)) {
@@ -876,7 +869,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
| (fd.code() << kFdShift) | func;
emit(instr);
@@ -890,7 +882,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
@@ -904,7 +895,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPURegister fd,
SecondaryField func) {
ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | fmt | (rt.code() << kRtShift)
| (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
emit(instr);
@@ -917,7 +907,6 @@ void Assembler::GenInstrRegister(Opcode opcode,
FPUControlRegister fs,
SecondaryField func) {
ASSERT(fs.is_valid() && rt.is_valid());
- ASSERT(IsEnabled(FPU));
Instr instr =
opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
emit(instr);
@@ -952,7 +941,6 @@ void Assembler::GenInstrImmediate(Opcode opcode,
FPURegister ft,
int32_t j) {
ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
- ASSERT(IsEnabled(FPU));
Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
| (j & kImm16Mask);
emit(instr);
@@ -1679,7 +1667,7 @@ void Assembler::cfc1(Register rt, FPUControlRegister fs) {
void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
- memcpy(&i, &d, 8);
+ OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@@ -1874,7 +1862,6 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
ASSERT((fmt & ~(31 << kRsShift)) == 0);
Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
@@ -1885,7 +1872,6 @@ void Assembler::c(FPUCondition cond, SecondaryField fmt,
void Assembler::fcmp(FPURegister src1, const double src2,
FPUCondition cond) {
- ASSERT(IsEnabled(FPU));
ASSERT(src2 == 0.0);
mtc1(zero_reg, f14);
cvt_d_w(f14, f14);
@@ -1894,7 +1880,6 @@ void Assembler::fcmp(FPURegister src1, const double src2,
void Assembler::bc1f(int16_t offset, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1902,7 +1887,6 @@ void Assembler::bc1f(int16_t offset, uint16_t cc) {
void Assembler::bc1t(int16_t offset, uint16_t cc) {
- ASSERT(IsEnabled(FPU));
ASSERT(is_uint3(cc));
Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
emit(instr);
@@ -1997,9 +1981,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(reloc_info_writer.pos() + rc_delta,
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 9d3d39b83..d12c0dace 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -74,7 +74,6 @@ struct Register {
static const int kNumRegisters = v8::internal::kNumRegisters;
static const int kMaxNumAllocatableRegisters = 14; // v0 through t7.
static const int kSizeInBytes = 4;
- static const int kGPRsPerNonFPUDouble = 2;
inline static int NumAllocatableRegisters();
@@ -300,9 +299,6 @@ const FPURegister f29 = { 29 };
const FPURegister f30 = { 30 };
const FPURegister f31 = { 31 };
-const Register sfpd_lo = { kRegister_t6_Code };
-const Register sfpd_hi = { kRegister_t7_Code };
-
// Register aliases.
// cp is assumed to be a callee saved register.
// Defined using #define instead of "static const Register&" because Clang
@@ -403,7 +399,6 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- if (f == FPU && !FLAG_enable_fpu) return false;
return (supported_ & (1u << f)) != 0;
}
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index 37d7720c8..cbe204807 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_MIPS)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "codegen.h"
#include "regexp-macro-assembler.h"
@@ -39,6 +40,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { a3, a2, a1 };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
+
+
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -134,7 +147,6 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Label* rhs_not_nan,
Label* slow,
bool strict);
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc);
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs);
@@ -183,9 +195,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
__ IncrementCounter(counters->fast_new_closure_total(), 1, t2, t3);
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+ int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@@ -402,147 +412,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- // a3: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
-
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- __ Allocate(size, v0, a1, a2, fail, TAG_OBJECT);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()->
- allocation_site_info_map())));
- __ sw(a2, FieldMemOperand(v0, allocation_info_start));
- __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize));
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ lw(a1, FieldMemOperand(a3, i));
- __ sw(a1, FieldMemOperand(v0, i));
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ Addu(a2, v0, Operand(JSArray::kSize));
- }
- __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset));
-
- // Copy the elements array.
- ASSERT((elements_size % kPointerSize) == 0);
- __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize);
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [sp]: constant elements.
- // [sp + kPointerSize]: literal index.
- // [sp + (2 * kPointerSize)]: literals array.
-
- // Load boilerplate object into r3 and check if we need to create a
- // boilerplate.
- Label slow_case;
- __ lw(a3, MemOperand(sp, 2 * kPointerSize));
- __ lw(a0, MemOperand(sp, 1 * kPointerSize));
- __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
- __ Addu(t0, a3, t0);
- __ lw(a3, MemOperand(t0));
- __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
- __ Branch(&slow_case, eq, a3, Operand(t1));
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
- __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex);
- __ Branch(&check_fast_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&check_fast_elements);
- __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex);
- __ Branch(&double_elements, ne, v0, Operand(t1));
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(a3);
- __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset));
- __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset));
- __ LoadRoot(at, expected_map_index);
- __ Assert(eq, message, a3, Operand(at));
- __ pop(a3);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
-
- // Return and remove the on-stack parameters.
- __ DropAndRet(3);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
// Takes a Smi and converts to an IEEE 64 bit floating point value in two
// registers. The format is 1 sign bit, 11 exponent bits (biased 1023) and
// 52 fraction bits (20 in the first word, 32 in the second). Zeros is a
@@ -646,30 +515,15 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register scratch1,
Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(scratch1, a0, kSmiTagSize);
- __ mtc1(scratch1, f14);
- __ cvt_d_w(f14, f14);
- __ sra(scratch1, a1, kSmiTagSize);
- __ mtc1(scratch1, f12);
- __ cvt_d_w(f12, f12);
- if (destination == kCoreRegisters) {
- __ Move(a2, a3, f14);
- __ Move(a0, a1, f12);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write Smi from a0 to a3 and a2 in double format.
- __ mov(scratch1, a0);
- ConvertToDoubleStub stub1(a3, a2, scratch1, scratch2);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
- // Write Smi from a1 to a1 and a0 in double format.
- __ mov(scratch1, a1);
- ConvertToDoubleStub stub2(a1, a0, scratch1, scratch2);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
+ __ sra(scratch1, a0, kSmiTagSize);
+ __ mtc1(scratch1, f14);
+ __ cvt_d_w(f14, f14);
+ __ sra(scratch1, a1, kSmiTagSize);
+ __ mtc1(scratch1, f12);
+ __ cvt_d_w(f12, f12);
+ if (destination == kCoreRegisters) {
+ __ Move(a2, a3, f14);
+ __ Move(a0, a1, f12);
}
}
@@ -684,8 +538,6 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Label* not_number) {
- ASSERT(!object.is(dst1) && !object.is(dst2));
-
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
@@ -698,9 +550,7 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
// Handle loading a double from a heap number.
- if (CpuFeatures::IsSupported(FPU) &&
- destination == kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
+ if (destination == kFPURegisters) {
// Load the double from tagged HeapNumber to double register.
// ARM uses a workaround here because of the unaligned HeapNumber
@@ -718,25 +568,13 @@ void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
// Handle loading a double from a smi.
__ bind(&is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Convert smi to double using FPU instructions.
- __ mtc1(scratch1, dst);
- __ cvt_d_w(dst, dst);
- if (destination == kCoreRegisters) {
- // Load the converted smi to dst1 and dst2 in double format.
- __ Move(dst1, dst2, dst);
- }
- } else {
- ASSERT(destination == kCoreRegisters);
- // Write smi to dst1 and dst2 double format.
- __ mov(scratch1, object);
- ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
- __ push(ra);
- __ Call(stub.GetCode(masm->isolate()));
- __ pop(ra);
+ // Convert smi to double using FPU instructions.
+ __ mtc1(scratch1, dst);
+ __ cvt_d_w(dst, dst);
+ if (destination == kCoreRegisters) {
+ // Load the converted smi to dst1 and dst2 in double format.
+ __ Move(dst1, dst2, dst);
}
-
__ bind(&done);
}
@@ -792,74 +630,11 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
ASSERT(!int_scratch.is(dst_mantissa));
ASSERT(!int_scratch.is(dst_exponent));
- Label done;
-
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(int_scratch, single_scratch);
- __ cvt_d_w(double_dst, single_scratch);
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- Label fewer_than_20_useful_bits;
- // Expected output:
- // | dst_exponent | dst_mantissa |
- // | s | exp | mantissa |
-
- // Check for zero.
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
- __ Branch(&done, eq, int_scratch, Operand(zero_reg));
-
- // Preload the sign of the value.
- __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
- // Get the absolute value of the object (as an unsigned integer).
- Label skip_sub;
- __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
- __ Subu(int_scratch, zero_reg, int_scratch);
- __ bind(&skip_sub);
-
- // Get mantissa[51:20].
-
- // Get the position of the first set bit.
- __ Clz(dst_mantissa, int_scratch);
- __ li(scratch2, 31);
- __ Subu(dst_mantissa, scratch2, dst_mantissa);
-
- // Set the exponent.
- __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Ins(dst_exponent, scratch2,
- HeapNumber::kExponentShift, HeapNumber::kExponentBits);
-
- // Clear the first non null bit.
- __ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst_mantissa);
- __ li(at, -1);
- __ Xor(scratch2, scratch2, at);
- __ And(int_scratch, int_scratch, scratch2);
-
- // Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst_mantissa,
- Operand(HeapNumber::kMantissaBitsInTopWord));
- __ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
- // Set the higher 20 bits of the mantissa.
- __ srlv(at, int_scratch, scratch2);
- __ or_(dst_exponent, dst_exponent, at);
- __ li(at, 32);
- __ subu(scratch2, at, scratch2);
- __ sllv(dst_mantissa, int_scratch, scratch2);
- __ Branch(&done);
-
- __ bind(&fewer_than_20_useful_bits);
- __ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst_mantissa);
- __ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst_exponent, dst_exponent, scratch2);
- // Set dst_mantissa to 0.
- __ mov(dst_mantissa, zero_reg);
+ __ mtc1(int_scratch, single_scratch);
+ __ cvt_d_w(double_dst, single_scratch);
+ if (destination == kCoreRegisters) {
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
- __ bind(&done);
}
@@ -880,10 +655,6 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
ASSERT(!heap_number_map.is(object) &&
!heap_number_map.is(scratch1) &&
!heap_number_map.is(scratch2));
- // ARM uses pop/push and Ldlr to save dst_* and probably object registers in
- // softfloat path. On MIPS there is no ldlr, 1st lw instruction may overwrite
- // object register making the 2nd lw invalid.
- ASSERT(!object.is(dst_mantissa) && !object.is(dst_exponent));
Label done, obj_is_not_smi;
@@ -900,46 +671,23 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Load the number.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Load the double value.
- __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- scratch1,
- double_dst,
- at,
- double_scratch,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
-
- if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
- }
- } else {
- // Load the double value in the destination registers.
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst_mantissa));
- __ Branch(&done, eq, scratch1, Operand(zero_reg));
-
- // Check that the value can be exactly represented by a 32-bit integer.
- // Jump to not_int32 if that's not the case.
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- not_int32);
-
- // dst_* were trashed. Reload the double value.
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ // Load the double value.
+ __ ldc1(double_dst, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ scratch1,
+ double_dst,
+ at,
+ double_scratch,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ if (destination == kCoreRegisters) {
+ __ Move(dst_mantissa, dst_exponent, double_dst);
}
-
__ bind(&done);
}
@@ -972,53 +720,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
-
- Register except_flag = scratch2;
- __ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
- scratch1,
- double_scratch1,
- except_flag,
- kCheckForInexactConversion);
-
- // Jump to not_int32 if the operation did not succeed.
- __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
- } else {
- // Load the double value in the destination registers.
- __ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
-
- // Check for 0 and -0.
- __ And(dst, scratch1, Operand(~HeapNumber::kSignMask));
- __ Or(dst, scratch2, Operand(dst));
- __ Branch(&done, eq, dst, Operand(zero_reg));
-
- DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32);
-
- // Registers state after DoubleIs32BitInteger.
- // dst: mantissa[51:20].
- // scratch2: 1
-
- // Shift back the higher bits of the mantissa.
- __ srlv(dst, dst, scratch3);
- // Set the implicit first bit.
- __ li(at, 32);
- __ subu(scratch3, at, scratch3);
- __ sllv(scratch2, scratch2, scratch3);
- __ Or(dst, dst, scratch2);
- // Set the sign.
- __ lw(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
- Label skip_sub;
- __ Branch(&skip_sub, ge, scratch1, Operand(zero_reg));
- __ Subu(dst, zero_reg, dst);
- __ bind(&skip_sub);
- }
+ // Load the double value.
+ __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+
+ Register except_flag = scratch2;
+ __ EmitFPUTruncate(kRoundToZero,
+ dst,
+ double_scratch0,
+ scratch1,
+ double_scratch1,
+ except_flag,
+ kCheckForInexactConversion);
+
+ // Jump to not_int32 if the operation did not succeed.
+ __ Branch(not_int32, ne, except_flag, Operand(zero_reg));
__ Branch(&done);
__ bind(&maybe_undefined);
@@ -1032,66 +747,6 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
}
-void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
- Register dst,
- Register scratch,
- Label* not_int32) {
- // Get exponent alone in scratch.
- __ Ext(scratch,
- src_exponent,
- HeapNumber::kExponentShift,
- HeapNumber::kExponentBits);
-
- // Substract the bias from the exponent.
- __ Subu(scratch, scratch, Operand(HeapNumber::kExponentBias));
-
- // src1: higher (exponent) part of the double value.
- // src2: lower (mantissa) part of the double value.
- // scratch: unbiased exponent.
-
- // Fast cases. Check for obvious non 32-bit integer values.
- // Negative exponent cannot yield 32-bit integers.
- __ Branch(not_int32, lt, scratch, Operand(zero_reg));
- // Exponent greater than 31 cannot yield 32-bit integers.
- // Also, a positive value with an exponent equal to 31 is outside of the
- // signed 32-bit integer range.
- // Another way to put it is that if (exponent - signbit) > 30 then the
- // number cannot be represented as an int32.
- Register tmp = dst;
- __ srl(at, src_exponent, 31);
- __ subu(tmp, scratch, at);
- __ Branch(not_int32, gt, tmp, Operand(30));
- // - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src_mantissa, 0x3fffff);
- __ Branch(not_int32, ne, tmp, Operand(zero_reg));
-
- // Otherwise the exponent needs to be big enough to shift left all the
- // non zero bits left. So we need the (30 - exponent) last bits of the
- // 31 higher bits of the mantissa to be null.
- // Because bits [21:0] are null, we can check instead that the
- // (32 - exponent) last bits of the 32 higher bits of the mantissa are null.
-
- // Get the 32 higher bits of the mantissa in dst.
- __ Ext(dst,
- src_mantissa,
- HeapNumber::kMantissaBitsInTopWord,
- 32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ or_(dst, dst, at);
-
- // Create the mask and test the lower bits (of the higher bits).
- __ li(at, 32);
- __ subu(scratch, at, scratch);
- __ li(src_mantissa, 1);
- __ sllv(src_exponent, src_mantissa, scratch);
- __ Subu(src_exponent, src_exponent, Operand(1));
- __ And(src_exponent, dst, src_exponent);
- __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
-}
-
-
void FloatingPointHelper::CallCCodeForDoubleOperation(
MacroAssembler* masm,
Token::Value op,
@@ -1111,7 +766,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
__ push(ra);
__ PrepareCallCFunction(4, scratch); // Two doubles are 4 arguments.
if (!IsMipsSoftFloatABI) {
- CpuFeatureScope scope(masm, FPU);
// We are not using MIPS FPU instructions, and parameters for the runtime
// function call are prepaired in a0-a3 registers, but function we are
// calling is compiled with hard-float flag and expecting hard float ABI
@@ -1127,7 +781,6 @@ void FloatingPointHelper::CallCCodeForDoubleOperation(
}
// Store answer in the overwritable heap number.
if (!IsMipsSoftFloatABI) {
- CpuFeatureScope scope(masm, FPU);
// Double returned in register f0.
__ sdc1(f0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset));
} else {
@@ -1350,25 +1003,10 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Rhs is a smi, lhs is a number.
// Convert smi rhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(at, rhs, kSmiTagSize);
- __ mtc1(at, f14);
- __ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- } else {
- // Load lhs to a double in a2, a3.
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
-
- // Write Smi from rhs to a1 and a0 in double format. t5 is scratch.
- __ mov(t6, rhs);
- ConvertToDoubleStub stub1(a1, a0, t6, t5);
- __ push(ra);
- __ Call(stub1.GetCode(masm->isolate()));
-
- __ pop(ra);
- }
+ __ sra(at, rhs, kSmiTagSize);
+ __ mtc1(at, f14);
+ __ cvt_d_w(f14, f14);
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
@@ -1389,179 +1027,14 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
// Lhs is a smi, rhs is a number.
// Convert smi lhs to double.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ sra(at, lhs, kSmiTagSize);
- __ mtc1(at, f12);
- __ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- // Convert lhs to a double format. t5 is scratch.
- __ mov(t6, lhs);
- ConvertToDoubleStub stub2(a3, a2, t6, t5);
- __ push(ra);
- __ Call(stub2.GetCode(masm->isolate()));
- __ pop(ra);
- // Load rhs to a double in a1, a0.
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ sra(at, lhs, kSmiTagSize);
+ __ mtc1(at, f12);
+ __ cvt_d_w(f12, f12);
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
-void EmitNanCheck(MacroAssembler* masm, Condition cc) {
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
- Label one_is_nan, neither_is_nan;
- Label lhs_not_nan_exp_mask_is_loaded;
-
- Register exp_mask_reg = t4;
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ and_(t5, lhs_exponent, exp_mask_reg);
- __ Branch(&lhs_not_nan_exp_mask_is_loaded, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, lhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&one_is_nan, ne, lhs_mantissa, Operand(zero_reg));
-
- __ li(exp_mask_reg, HeapNumber::kExponentMask);
- __ bind(&lhs_not_nan_exp_mask_is_loaded);
- __ and_(t5, rhs_exponent, exp_mask_reg);
-
- __ Branch(&neither_is_nan, ne, t5, Operand(exp_mask_reg));
-
- __ sll(t5, rhs_exponent, HeapNumber::kNonMantissaBitsInTopWord);
- __ Branch(&one_is_nan, ne, t5, Operand(zero_reg));
-
- __ Branch(&neither_is_nan, eq, rhs_mantissa, Operand(zero_reg));
-
- __ bind(&one_is_nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
-
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
-
- __ bind(&neither_is_nan);
-}
-
-
-static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) {
- // f12 and f14 have the two doubles. Neither is a NaN.
- // Call a native function to do a comparison between two non-NaNs.
- // Call C routine that may not cause GC or other trouble.
- // We use a call_was and return manually because we need arguments slots to
- // be freed.
-
- Label return_result_not_equal, return_result_equal;
- if (cc == eq) {
- // Doubles are not equal unless they have the same bit pattern.
- // Exception: 0 and -0.
- bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Lhs and rhs are already loaded to f12 and f14 register pairs.
- __ Move(t0, t1, f14);
- __ Move(t2, t3, f12);
- } else {
- // Lhs and rhs are already loaded to GP registers.
- __ mov(t0, a0); // a0 has LS 32 bits of rhs.
- __ mov(t1, a1); // a1 has MS 32 bits of rhs.
- __ mov(t2, a2); // a2 has LS 32 bits of lhs.
- __ mov(t3, a3); // a3 has MS 32 bits of lhs.
- }
- Register rhs_exponent = exp_first ? t0 : t1;
- Register lhs_exponent = exp_first ? t2 : t3;
- Register rhs_mantissa = exp_first ? t1 : t0;
- Register lhs_mantissa = exp_first ? t3 : t2;
-
- __ xor_(v0, rhs_mantissa, lhs_mantissa);
- __ Branch(&return_result_not_equal, ne, v0, Operand(zero_reg));
-
- __ subu(v0, rhs_exponent, lhs_exponent);
- __ Branch(&return_result_equal, eq, v0, Operand(zero_reg));
- // 0, -0 case.
- __ sll(rhs_exponent, rhs_exponent, kSmiTagSize);
- __ sll(lhs_exponent, lhs_exponent, kSmiTagSize);
- __ or_(t4, rhs_exponent, lhs_exponent);
- __ or_(t4, t4, rhs_mantissa);
-
- __ Branch(&return_result_not_equal, ne, t4, Operand(zero_reg));
-
- __ bind(&return_result_equal);
-
- __ li(v0, Operand(EQUAL));
- __ Ret();
- }
-
- __ bind(&return_result_not_equal);
-
- if (!CpuFeatures::IsSupported(FPU)) {
- __ push(ra);
- __ PrepareCallCFunction(0, 2, t4);
- if (!IsMipsSoftFloatABI) {
- // We are not using MIPS FPU instructions, and parameters for the runtime
- // function call are prepaired in a0-a3 registers, but function we are
- // calling is compiled with hard-float flag and expecting hard float ABI
- // (parameters in f12/f14 registers). We need to copy parameters from
- // a0-a3 registers to f12/f14 register pairs.
- __ Move(f12, a0, a1);
- __ Move(f14, a2, a3);
- }
-
- AllowExternalCallThatCantCauseGC scope(masm);
- __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()),
- 0, 2);
- __ pop(ra); // Because this function returns int, result is in v0.
- __ Ret();
- } else {
- CpuFeatureScope scope(masm, FPU);
- Label equal, less_than;
- __ BranchF(&equal, NULL, eq, f12, f14);
- __ BranchF(&less_than, NULL, lt, f12, f14);
-
- // Not equal, not less, not NaN, must be greater.
-
- __ li(v0, Operand(GREATER));
- __ Ret();
-
- __ bind(&equal);
- __ li(v0, Operand(EQUAL));
- __ Ret();
-
- __ bind(&less_than);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
-}
-
-
static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
Register lhs,
Register rhs) {
@@ -1616,21 +1089,9 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a2, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(lhs, HeapNumber::kValueOffset + 4));
- if (rhs.is(a0)) {
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- } else {
- __ lw(a0, FieldMemOperand(rhs, HeapNumber::kValueOffset));
- __ lw(a1, FieldMemOperand(rhs, HeapNumber::kValueOffset + 4));
- }
- }
+ __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
__ jmp(both_loaded_as_doubles);
}
@@ -1711,42 +1172,34 @@ void NumberToStringStub::GenerateLookupNumberStringCache(MacroAssembler* masm,
Label load_result_from_cache;
if (!object_is_smi) {
__ JumpIfSmi(object, &is_smi);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ CheckMap(object,
- scratch1,
- Heap::kHeapNumberMapRootIndex,
- not_found,
- DONT_DO_SMI_CHECK);
-
- STATIC_ASSERT(8 == kDoubleSize);
- __ Addu(scratch1,
- object,
- Operand(HeapNumber::kValueOffset - kHeapObjectTag));
- __ lw(scratch2, MemOperand(scratch1, kPointerSize));
- __ lw(scratch1, MemOperand(scratch1, 0));
- __ Xor(scratch1, scratch1, Operand(scratch2));
- __ And(scratch1, scratch1, Operand(mask));
-
- // Calculate address of entry in string cache: each entry consists
- // of two pointer sized fields.
- __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
- __ Addu(scratch1, number_string_cache, scratch1);
-
- Register probe = mask;
- __ lw(probe,
- FieldMemOperand(scratch1, FixedArray::kHeaderSize));
- __ JumpIfSmi(probe, not_found);
- __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
- __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
- __ Branch(not_found);
- } else {
- // Note that there is no cache check for non-FPU case, even though
- // it seems there could be. May be a tiny opimization for non-FPU
- // cores.
- __ Branch(not_found);
- }
+ __ CheckMap(object,
+ scratch1,
+ Heap::kHeapNumberMapRootIndex,
+ not_found,
+ DONT_DO_SMI_CHECK);
+
+ STATIC_ASSERT(8 == kDoubleSize);
+ __ Addu(scratch1,
+ object,
+ Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+ __ lw(scratch2, MemOperand(scratch1, kPointerSize));
+ __ lw(scratch1, MemOperand(scratch1, 0));
+ __ Xor(scratch1, scratch1, Operand(scratch2));
+ __ And(scratch1, scratch1, Operand(mask));
+
+ // Calculate address of entry in string cache: each entry consists
+ // of two pointer sized fields.
+ __ sll(scratch1, scratch1, kPointerSizeLog2 + 1);
+ __ Addu(scratch1, number_string_cache, scratch1);
+
+ Register probe = mask;
+ __ lw(probe,
+ FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+ __ JumpIfSmi(probe, not_found);
+ __ ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+ __ BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+ __ Branch(not_found);
}
__ bind(&is_smi);
@@ -1864,49 +1317,38 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// left hand side and a0, a1 represent right hand side.
Isolate* isolate = masm->isolate();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- Label nan;
- __ li(t0, Operand(LESS));
- __ li(t1, Operand(GREATER));
- __ li(t2, Operand(EQUAL));
-
- // Check if either rhs or lhs is NaN.
- __ BranchF(NULL, &nan, eq, f12, f14);
-
- // Check if LESS condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(OLT, D, f12, f14);
- __ Movt(v0, t0);
- // Use previous check to store conditionally to v0 oposite condition
- // (GREATER). If rhs is equal to lhs, this will be corrected in next
- // check.
- __ Movf(v0, t1);
- // Check if EQUAL condition is satisfied. If true, move conditionally
- // result to v0.
- __ c(EQ, D, f12, f14);
- __ Movt(v0, t2);
+ Label nan;
+ __ li(t0, Operand(LESS));
+ __ li(t1, Operand(GREATER));
+ __ li(t2, Operand(EQUAL));
+
+ // Check if either rhs or lhs is NaN.
+ __ BranchF(NULL, &nan, eq, f12, f14);
+
+ // Check if LESS condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(OLT, D, f12, f14);
+ __ Movt(v0, t0);
+ // Use previous check to store conditionally to v0 oposite condition
+ // (GREATER). If rhs is equal to lhs, this will be corrected in next
+ // check.
+ __ Movf(v0, t1);
+ // Check if EQUAL condition is satisfied. If true, move conditionally
+ // result to v0.
+ __ c(EQ, D, f12, f14);
+ __ Movt(v0, t2);
- __ Ret();
+ __ Ret();
- __ bind(&nan);
- // NaN comparisons always fail.
- // Load whatever we need in v0 to make the comparison fail.
- if (cc == lt || cc == le) {
- __ li(v0, Operand(GREATER));
- } else {
- __ li(v0, Operand(LESS));
- }
- __ Ret();
+ __ bind(&nan);
+ // NaN comparisons always fail.
+ // Load whatever we need in v0 to make the comparison fail.
+ if (cc == lt || cc == le) {
+ __ li(v0, Operand(GREATER));
} else {
- // Checks for NaN in the doubles we have loaded. Can return the answer or
- // fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc);
-
- // Compares two doubles that are not NaNs. Returns the answer.
- // Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ __ li(v0, Operand(LESS));
}
+ __ Ret();
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
@@ -1999,9 +1441,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// The stub expects its argument in the tos_ register and returns its result in
// it, too: zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
- // This stub uses FPU instructions.
- CpuFeatureScope scope(masm, FPU);
-
Label patch;
const Register map = t5.is(tos_) ? t3 : t5;
@@ -2115,7 +1554,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
// restore them.
__ MultiPush(kJSCallerSaved | ra.bit());
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
__ MultiPushFPU(kCallerSavedFPU);
}
const int argument_count = 1;
@@ -2129,7 +1567,6 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
ExternalReference::store_buffer_overflow_function(masm->isolate()),
argument_count);
if (save_doubles_ == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
__ MultiPopFPU(kCallerSavedFPU);
}
@@ -2360,19 +1797,11 @@ void UnaryOpStub::GenerateHeapNumberCodeBitNot(
__ mov(v0, a2); // Move newly allocated heap number to v0.
}
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(a1, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
- } else {
- // WriteInt32ToHeapNumberStub does not trigger GC, so we do not
- // have to set up a frame.
- WriteInt32ToHeapNumberStub stub(a1, v0, a2, a3);
- __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
- }
+ // Convert the int32 in a1 to the heap number in v0. a2 is corrupted.
+ __ mtc1(a1, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
__ bind(&impossible);
if (FLAG_debug_code) {
@@ -2434,7 +1863,7 @@ void UnaryOpStub::GenerateGenericCodeFallback(
void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
+ platform_specific_bit_ = true; // FPU is a base requirement for V8.
}
@@ -2661,9 +2090,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
case Token::DIV:
case Token::MOD: {
// Load left and right operands into f12 and f14 or a0/a1 and a2/a3
- // depending on whether FPU is available or not.
+ // depending on operation.
FloatingPointHelper::Destination destination =
- CpuFeatures::IsSupported(FPU) &&
op != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
@@ -2688,20 +2116,16 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
masm, destination, right, f14, a2, a3, heap_number_map,
scratch1, scratch2, fail);
}
- // Use scratch3 as left in LoadNumber functions to avoid overwriting of
- // left (a0) register.
- __ mov(scratch3, left);
-
// Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
// jumps to |miss|.
if (left_type == BinaryOpIC::INT32) {
FloatingPointHelper::LoadNumberAsInt32Double(
- masm, scratch3, destination, f12, f16, a0, a1, heap_number_map,
+ masm, left, destination, f12, f16, a0, a1, heap_number_map,
scratch1, scratch2, f2, miss);
} else {
Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers;
FloatingPointHelper::LoadNumber(
- masm, destination, scratch3, f12, a0, a1, heap_number_map,
+ masm, destination, left, f12, a0, a1, heap_number_map,
scratch1, scratch2, fail);
}
}
@@ -2711,7 +2135,6 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Using FPU registers:
// f12: Left value.
// f14: Right value.
- CpuFeatureScope scope(masm, FPU);
switch (op) {
case Token::ADD:
__ add_d(f10, f12, f14);
@@ -2801,11 +2224,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// The code below for writing into heap numbers isn't capable of
// writing the register as an unsigned int so we go to slow case if we
// hit this case.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
- } else {
- __ Branch(not_numbers, lt, a2, Operand(zero_reg));
- }
+ __ Branch(&result_not_a_smi, lt, a2, Operand(zero_reg));
break;
case Token::SHL:
// Use only the 5 least significant bits of the shift count.
@@ -2839,28 +2258,19 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Nothing can go wrong now, so move the heap number to v0, which is the
// result.
__ mov(v0, t1);
-
- if (CpuFeatures::IsSupported(FPU)) {
- // Convert the int32 in a2 to the heap number in a0. As
- // mentioned above SHR needs to always produce a positive result.
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(a2, f0);
- if (op == Token::SHR) {
- __ Cvt_d_uw(f0, f0, f22);
- } else {
- __ cvt_d_w(f0, f0);
- }
- // ARM uses a workaround here because of the unaligned HeapNumber
- // kValueOffset. On MIPS this workaround is built into sdc1 so
- // there's no point in generating even more instructions.
- __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
+ // Convert the int32 in a2 to the heap number in a0. As
+ // mentioned above SHR needs to always produce a positive result.
+ __ mtc1(a2, f0);
+ if (op == Token::SHR) {
+ __ Cvt_d_uw(f0, f0, f22);
} else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
+ __ cvt_d_w(f0, f0);
}
+ // ARM uses a workaround here because of the unaligned HeapNumber
+ // kValueOffset. On MIPS this workaround is built into sdc1 so
+ // there's no point in generating even more instructions.
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
break;
}
default:
@@ -3007,8 +2417,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
- FloatingPointHelper::Destination destination =
- (CpuFeatures::IsSupported(FPU) && op_ != Token::MOD)
+ FloatingPointHelper::Destination destination = (op_ != Token::MOD)
? FloatingPointHelper::kFPURegisters
: FloatingPointHelper::kCoreRegisters;
@@ -3038,7 +2447,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&transition);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
Label return_heap_number;
switch (op_) {
case Token::ADD:
@@ -3207,23 +2615,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// We only get a negative result if the shift value (a2) is 0.
// This result cannot be respresented as a signed 32-bit integer, try
// to return a heap number if we can.
- // The non FPU code does not support this special case, so jump to
- // runtime if we don't support it.
- if (CpuFeatures::IsSupported(FPU)) {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &return_heap_number,
- lt,
- a2,
- Operand(zero_reg));
- } else {
- __ Branch((result_type_ <= BinaryOpIC::INT32)
- ? &transition
- : &call_runtime,
- lt,
- a2,
- Operand(zero_reg));
- }
+ __ Branch((result_type_ <= BinaryOpIC::INT32)
+ ? &transition
+ : &return_heap_number,
+ lt,
+ a2,
+ Operand(zero_reg));
break;
case Token::SHL:
__ And(a2, a2, Operand(0x1f));
@@ -3251,31 +2648,21 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
&call_runtime,
mode_);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- if (op_ != Token::SHR) {
- // Convert the result to a floating point value.
- __ mtc1(a2, double_scratch);
- __ cvt_d_w(double_scratch, double_scratch);
- } else {
- // The result must be interpreted as an unsigned 32-bit integer.
- __ mtc1(a2, double_scratch);
- __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
- }
-
- // Store the result.
- __ mov(v0, heap_number_result);
- __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Ret();
+ if (op_ != Token::SHR) {
+ // Convert the result to a floating point value.
+ __ mtc1(a2, double_scratch);
+ __ cvt_d_w(double_scratch, double_scratch);
} else {
- // Tail call that writes the int32 in a2 to the heap number in v0, using
- // a3 and a0 as scratch. v0 is preserved and returned.
- __ mov(v0, t1);
- WriteInt32ToHeapNumberStub stub(a2, v0, a3, a0);
- __ TailCallStub(&stub);
+ // The result must be interpreted as an unsigned 32-bit integer.
+ __ mtc1(a2, double_scratch);
+ __ Cvt_d_uw(double_scratch, double_scratch, single_scratch);
}
+ // Store the result.
+ __ mov(v0, heap_number_result);
+ __ sdc1(double_scratch, FieldMemOperand(v0, HeapNumber::kValueOffset));
+ __ Ret();
+
break;
}
@@ -3455,107 +2842,102 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
const Register cache_entry = a0;
const bool tagged = (argument_type_ == TAGGED);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- if (tagged) {
- // Argument is a number and is on stack and in a0.
- // Load argument and check if it is a smi.
- __ JumpIfNotSmi(a0, &input_not_smi);
-
- // Input is a smi. Convert to double and load the low and high words
- // of the double into a2, a3.
- __ sra(t0, a0, kSmiTagSize);
- __ mtc1(t0, f4);
- __ cvt_d_w(f4, f4);
- __ Move(a2, a3, f4);
- __ Branch(&loaded);
-
- __ bind(&input_not_smi);
- // Check if input is a HeapNumber.
- __ CheckMap(a0,
- a1,
- Heap::kHeapNumberMapRootIndex,
- &calculate,
- DONT_DO_SMI_CHECK);
- // Input is a HeapNumber. Store the
- // low and high words into a2, a3.
- __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
- __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
- } else {
- // Input is untagged double in f4. Output goes to f4.
- __ Move(a2, a3, f4);
- }
- __ bind(&loaded);
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // Compute hash (the shifts are arithmetic):
- // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
- __ Xor(a1, a2, a3);
- __ sra(t0, a1, 16);
- __ Xor(a1, a1, t0);
- __ sra(t0, a1, 8);
- __ Xor(a1, a1, t0);
- ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
- __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
-
- // a2 = low 32 bits of double value.
- // a3 = high 32 bits of double value.
- // a1 = TranscendentalCache::hash(double value).
- __ li(cache_entry, Operand(
- ExternalReference::transcendental_cache_array_address(
- masm->isolate())));
- // a0 points to cache array.
- __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
- Isolate::Current()->transcendental_cache()->caches_[0])));
- // a0 points to the cache for the type type_.
- // If NULL, the cache hasn't been initialized yet, so go through runtime.
- __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
+ if (tagged) {
+ // Argument is a number and is on stack and in a0.
+ // Load argument and check if it is a smi.
+ __ JumpIfNotSmi(a0, &input_not_smi);
+
+ // Input is a smi. Convert to double and load the low and high words
+ // of the double into a2, a3.
+ __ sra(t0, a0, kSmiTagSize);
+ __ mtc1(t0, f4);
+ __ cvt_d_w(f4, f4);
+ __ Move(a2, a3, f4);
+ __ Branch(&loaded);
+
+ __ bind(&input_not_smi);
+ // Check if input is a HeapNumber.
+ __ CheckMap(a0,
+ a1,
+ Heap::kHeapNumberMapRootIndex,
+ &calculate,
+ DONT_DO_SMI_CHECK);
+ // Input is a HeapNumber. Store the
+ // low and high words into a2, a3.
+ __ lw(a2, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ lw(a3, FieldMemOperand(a0, HeapNumber::kValueOffset + 4));
+ } else {
+ // Input is untagged double in f4. Output goes to f4.
+ __ Move(a2, a3, f4);
+ }
+ __ bind(&loaded);
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // Compute hash (the shifts are arithmetic):
+ // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1);
+ __ Xor(a1, a2, a3);
+ __ sra(t0, a1, 16);
+ __ Xor(a1, a1, t0);
+ __ sra(t0, a1, 8);
+ __ Xor(a1, a1, t0);
+ ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize));
+ __ And(a1, a1, Operand(TranscendentalCache::SubCache::kCacheSize - 1));
+
+ // a2 = low 32 bits of double value.
+ // a3 = high 32 bits of double value.
+ // a1 = TranscendentalCache::hash(double value).
+ __ li(cache_entry, Operand(
+ ExternalReference::transcendental_cache_array_address(
+ masm->isolate())));
+ // a0 points to cache array.
+ __ lw(cache_entry, MemOperand(cache_entry, type_ * sizeof(
+ Isolate::Current()->transcendental_cache()->caches_[0])));
+ // a0 points to the cache for the type type_.
+ // If NULL, the cache hasn't been initialized yet, so go through runtime.
+ __ Branch(&invalid_cache, eq, cache_entry, Operand(zero_reg));
#ifdef DEBUG
- // Check that the layout of cache elements match expectations.
- { TranscendentalCache::SubCache::Element test_elem[2];
- char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
- char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
- char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
- char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
- char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
- CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
- CHECK_EQ(0, elem_in0 - elem_start);
- CHECK_EQ(kIntSize, elem_in1 - elem_start);
- CHECK_EQ(2 * kIntSize, elem_out - elem_start);
- }
+ // Check that the layout of cache elements match expectations.
+ { TranscendentalCache::SubCache::Element test_elem[2];
+ char* elem_start = reinterpret_cast<char*>(&test_elem[0]);
+ char* elem2_start = reinterpret_cast<char*>(&test_elem[1]);
+ char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0]));
+ char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1]));
+ char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output));
+ CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer.
+ CHECK_EQ(0, elem_in0 - elem_start);
+ CHECK_EQ(kIntSize, elem_in1 - elem_start);
+ CHECK_EQ(2 * kIntSize, elem_out - elem_start);
+ }
#endif
- // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
- __ sll(t0, a1, 1);
- __ Addu(a1, a1, t0);
- __ sll(t0, a1, 2);
- __ Addu(cache_entry, cache_entry, t0);
-
- // Check if cache matches: Double value is stored in uint32_t[2] array.
- __ lw(t0, MemOperand(cache_entry, 0));
- __ lw(t1, MemOperand(cache_entry, 4));
- __ lw(t2, MemOperand(cache_entry, 8));
- __ Branch(&calculate, ne, a2, Operand(t0));
- __ Branch(&calculate, ne, a3, Operand(t1));
- // Cache hit. Load result, cleanup and return.
- Counters* counters = masm->isolate()->counters();
- __ IncrementCounter(
- counters->transcendental_cache_hit(), 1, scratch0, scratch1);
- if (tagged) {
- // Pop input value from stack and load result into v0.
- __ Drop(1);
- __ mov(v0, t2);
- } else {
- // Load result into f4.
- __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
- }
- __ Ret();
- } // if (CpuFeatures::IsSupported(FPU))
+ // Find the address of the a1'st entry in the cache, i.e., &a0[a1*12].
+ __ sll(t0, a1, 1);
+ __ Addu(a1, a1, t0);
+ __ sll(t0, a1, 2);
+ __ Addu(cache_entry, cache_entry, t0);
+
+ // Check if cache matches: Double value is stored in uint32_t[2] array.
+ __ lw(t0, MemOperand(cache_entry, 0));
+ __ lw(t1, MemOperand(cache_entry, 4));
+ __ lw(t2, MemOperand(cache_entry, 8));
+ __ Branch(&calculate, ne, a2, Operand(t0));
+ __ Branch(&calculate, ne, a3, Operand(t1));
+ // Cache hit. Load result, cleanup and return.
+ Counters* counters = masm->isolate()->counters();
+ __ IncrementCounter(
+ counters->transcendental_cache_hit(), 1, scratch0, scratch1);
+ if (tagged) {
+ // Pop input value from stack and load result into v0.
+ __ Drop(1);
+ __ mov(v0, t2);
+ } else {
+ // Load result into f4.
+ __ ldc1(f4, FieldMemOperand(t2, HeapNumber::kValueOffset));
+ }
+ __ Ret();
__ bind(&calculate);
- Counters* counters = masm->isolate()->counters();
__ IncrementCounter(
counters->transcendental_cache_miss(), 1, scratch0, scratch1);
if (tagged) {
@@ -3565,9 +2947,6 @@ void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
1,
1);
} else {
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatureScope scope(masm, FPU);
-
Label no_update;
Label skip_cache;
@@ -3694,7 +3073,6 @@ void InterruptStub::Generate(MacroAssembler* masm) {
void MathPowStub::Generate(MacroAssembler* masm) {
- CpuFeatureScope fpu_scope(masm, FPU);
const Register base = a1;
const Register exponent = a2;
const Register heapnumbermap = t1;
@@ -3924,14 +3302,13 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
void CodeStub::GenerateFPStubs(Isolate* isolate) {
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
+ SaveFPRegsMode mode = kSaveFPRegs;
CEntryStub save_doubles(1, mode);
StoreBufferOverflowStub stub(mode);
// These stubs might already be in the snapshot, detect that and don't
@@ -3940,11 +3317,13 @@ void CodeStub::GenerateFPStubs(Isolate* isolate) {
Code* save_doubles_code;
if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) {
save_doubles_code = *save_doubles.GetCode(isolate);
- save_doubles_code->set_is_pregenerated(true);
-
- Code* store_buffer_overflow_code = *stub.GetCode(isolate);
- store_buffer_overflow_code->set_is_pregenerated(true);
}
+ Code* store_buffer_overflow_code;
+ if (!stub.FindCodeInCache(&store_buffer_overflow_code, isolate)) {
+ store_buffer_overflow_code = *stub.GetCode(isolate);
+ }
+ save_doubles_code->set_is_pregenerated(true);
+ store_buffer_overflow_code->set_is_pregenerated(true);
isolate->set_fp_stubs_generated(true);
}
@@ -4075,11 +3454,18 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
- __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
+ // Retrieve the pending exception.
__ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
__ lw(v0, MemOperand(t0));
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, v0, t0, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ __ li(a3, Operand(isolate->factory()->the_hole_value()));
+ __ li(t0, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+ isolate)));
__ sw(a3, MemOperand(t0));
// Special handling of termination exceptions which are uncatchable
@@ -4200,20 +3586,15 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Save callee saved registers on the stack.
__ MultiPush(kCalleeSaved | ra.bit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Save callee-saved FPU registers.
- __ MultiPushFPU(kCalleeSavedFPU);
- // Set up the reserved register for 0.0.
- __ Move(kDoubleRegZero, 0.0);
- }
+ // Save callee-saved FPU registers.
+ __ MultiPushFPU(kCalleeSavedFPU);
+ // Set up the reserved register for 0.0.
+ __ Move(kDoubleRegZero, 0.0);
// Load argv in s0 register.
int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
- if (CpuFeatures::IsSupported(FPU)) {
- offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
- }
+ offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
__ InitializeRootRegister();
__ lw(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
@@ -4349,11 +3730,8 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Reset the stack to the callee saved registers.
__ addiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- // Restore callee-saved fpu registers.
- __ MultiPopFPU(kCalleeSavedFPU);
- }
+ // Restore callee-saved fpu registers.
+ __ MultiPopFPU(kCalleeSavedFPU);
// Restore callee saved registers from the stack.
__ MultiPop(kCalleeSaved | ra.bit());
@@ -7092,59 +6470,55 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
}
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or FPU is unsupported.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(a0, &right_smi);
- __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&left);
- __ bind(&right_smi);
- __ SmiUntag(a2, a0); // Can't clobber a0 yet.
- FPURegister single_scratch = f6;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f2, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(a1, &left_smi);
- __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&done);
- __ bind(&left_smi);
- __ SmiUntag(a2, a1); // Can't clobber a1 yet.
- single_scratch = f8;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f0, single_scratch);
+ // stub if NaN is involved.
+ // Load left and right operand.
+ Label done, left, left_smi, right_smi;
+ __ JumpIfSmi(a0, &right_smi);
+ __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&left);
+ __ bind(&right_smi);
+ __ SmiUntag(a2, a0); // Can't clobber a0 yet.
+ FPURegister single_scratch = f6;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f2, single_scratch);
- __ bind(&done);
+ __ bind(&left);
+ __ JumpIfSmi(a1, &left_smi);
+ __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+ DONT_DO_SMI_CHECK);
+ __ Subu(a2, a1, Operand(kHeapObjectTag));
+ __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Branch(&done);
+ __ bind(&left_smi);
+ __ SmiUntag(a2, a1); // Can't clobber a1 yet.
+ single_scratch = f8;
+ __ mtc1(a2, single_scratch);
+ __ cvt_d_w(f0, single_scratch);
- // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
- Label fpu_eq, fpu_lt;
- // Test if equal, and also handle the unordered/NaN case.
- __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+ __ bind(&done);
- // Test if less (unordered case is already handled).
- __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+ // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+ Label fpu_eq, fpu_lt;
+ // Test if equal, and also handle the unordered/NaN case.
+ __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
- // Otherwise it's greater, so just fall thru, and return.
- __ li(v0, Operand(GREATER));
- __ Ret();
+ // Test if less (unordered case is already handled).
+ __ BranchF(&fpu_lt, NULL, lt, f0, f2);
- __ bind(&fpu_eq);
- __ li(v0, Operand(EQUAL));
- __ Ret();
+ // Otherwise it's greater, so just fall thru, and return.
+ __ li(v0, Operand(GREATER));
+ __ Ret();
- __ bind(&fpu_lt);
- __ li(v0, Operand(LESS));
- __ Ret();
- }
+ __ bind(&fpu_eq);
+ __ li(v0, Operand(EQUAL));
+ __ Ret();
+
+ __ bind(&fpu_lt);
+ __ li(v0, Operand(LESS));
+ __ Ret();
__ bind(&unordered);
__ bind(&generic_stub);
@@ -7785,11 +7159,6 @@ bool RecordWriteStub::IsPregenerated() {
}
-bool StoreBufferOverflowStub::IsPregenerated() {
- return save_doubles_ == kDontSaveFPRegs || ISOLATE->fp_stubs_generated();
-}
-
-
void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
Isolate* isolate) {
StoreBufferOverflowStub stub1(kDontSaveFPRegs);
@@ -7812,7 +7181,7 @@ void RecordWriteStub::GenerateFixedRegStubsAheadOfTime(Isolate* isolate) {
bool CodeStub::CanUseFPRegisters() {
- return CpuFeatures::IsSupported(FPU);
+ return true; // FPU is a base requirement for V8.
}
@@ -8080,9 +7449,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- bool save_fp_regs = CpuFeatures::IsSupported(FPU);
- CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index 225accc51..0ebfe5995 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -62,11 +62,11 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
- : save_doubles_(save_fp) { }
+ : save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
- virtual bool IsPregenerated();
+ virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@@ -484,7 +484,6 @@ class RecordWriteStub: public PlatformCodeStub {
void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
masm->MultiPushFPU(kCallerSavedFPU);
}
}
@@ -492,7 +491,6 @@ class RecordWriteStub: public PlatformCodeStub {
inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
SaveFPRegsMode mode) {
if (mode == kSaveFPRegs) {
- CpuFeatureScope scope(masm, FPU);
masm->MultiPopFPU(kCallerSavedFPU);
}
masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
@@ -683,27 +681,6 @@ class FloatingPointHelper : public AllStatic {
FPURegister double_scratch1,
Label* not_int32);
- // Generate non FPU code to check if a double can be exactly represented by a
- // 32-bit integer. This does not check for 0 or -0, which need
- // to be checked for separately.
- // Control jumps to not_int32 if the value is not a 32-bit integer, and falls
- // through otherwise.
- // src1 and src2 will be cloberred.
- //
- // Expected input:
- // - src1: higher (exponent) part of the double value.
- // - src2: lower (mantissa) part of the double value.
- // Output status:
- // - dst: 32 higher bits of the mantissa. (mantissa[51:20])
- // - src2: contains 1.
- // - other registers are clobbered.
- static void DoubleIs32BitInteger(MacroAssembler* masm,
- Register src1,
- Register src2,
- Register dst,
- Register scratch,
- Label* not_int32);
-
// Generates code to call a C function to do a double operation using core
// registers. (Used when FPU is not supported.)
// This code never falls through, but returns with a heap number containing
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index bd403cee6..e874a0872 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(FPU)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
@@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
- CpuFeatureScope use_fpu(&masm, FPU);
DoubleRegister input = f12;
DoubleRegister result = f0;
DoubleRegister double_scratch1 = f4;
@@ -94,7 +92,7 @@ UnaryMathFunction CreateExpFunction() {
if (!IsMipsSoftFloatABI) {
// Result is already in f0, nothing to do.
} else {
- __ Move(a0, a1, result);
+ __ Move(v0, v1, result);
}
__ Ret();
}
@@ -184,7 +182,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- t0 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
- bool fpu_supported = CpuFeatures::IsSupported(FPU);
Register scratch = t6;
@@ -249,8 +246,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// t2: end of destination FixedDoubleArray, not tagged
// t3: begin of FixedDoubleArray element fields, not tagged
- if (!fpu_supported) __ Push(a1, a0);
-
__ Branch(&entry);
__ bind(&only_change_map);
@@ -278,25 +273,11 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
// Normal smi, convert to double and store.
- if (fpu_supported) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(t5, f0);
- __ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(t3));
- __ Addu(t3, t3, kDoubleSize);
- } else {
- FloatingPointHelper::ConvertIntToDouble(masm,
- t5,
- FloatingPointHelper::kCoreRegisters,
- f0,
- a0,
- a1,
- t7,
- f0);
- __ sw(a0, MemOperand(t3)); // mantissa
- __ sw(a1, MemOperand(t3, kIntSize)); // exponent
- __ Addu(t3, t3, kDoubleSize);
- }
+ __ mtc1(t5, f0);
+ __ cvt_d_w(f0, f0);
+ __ sdc1(f0, MemOperand(t3));
+ __ Addu(t3, t3, kDoubleSize);
+
__ Branch(&entry);
// Hole found, store the-hole NaN.
@@ -315,7 +296,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ bind(&entry);
__ Branch(&loop, lt, t3, Operand(t2));
- if (!fpu_supported) __ Pop(a1, a0);
__ pop(ra);
__ bind(&done);
}
diff --git a/deps/v8/src/mips/constants-mips.h b/deps/v8/src/mips/constants-mips.h
index 139e7db03..5a0870fd2 100644
--- a/deps/v8/src/mips/constants-mips.h
+++ b/deps/v8/src/mips/constants-mips.h
@@ -61,8 +61,9 @@ enum ArchVariants {
// -mhard-float is passed to the compiler.
const bool IsMipsSoftFloatABI = false;
#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
-// Not using floating-point coprocessor instructions. This flag is raised when
-// -msoft-float is passed to the compiler.
+// This flag is raised when -msoft-float is passed to the compiler.
+// Although FPU is a base requirement for v8, soft-float ABI is used
+// on soft-float systems with FPU kernel emulation.
const bool IsMipsSoftFloatABI = true;
#else
const bool IsMipsSoftFloatABI = true;
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index 7158e4f55..7896f2013 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -116,76 +116,93 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
}
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
+// This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
+// The back edge bookkeeping code matches the pattern:
+//
+// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
+// beq at, zero_reg, ok
+// lui t9, <interrupt stub address> upper
+// ori t9, <interrupt stub address> lower
+// jalr t9
+// nop
+// ok-label ----- pc_after points here
+//
+// We patch the code to the following form:
+//
+// addiu at, zero_reg, 1
+// beq at, zero_reg, ok ;; Not changed
+// lui t9, <on-stack replacement address> upper
+// ori t9, <on-stack replacement address> lower
+// jalr t9 ;; Not changed
+// nop ;; Not changed
+// ok-label ----- pc_after points here
+
+void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
- Code* check_code,
+ Code* interrupt_code,
Code* replacement_code) {
- const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
- // The call of the stack guard check has the following form:
- // sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
- // beq at, zero_reg, ok
- // lui t9, <stack guard address> upper
- // ori t9, <stack guard address> lower
- // jalr t9
- // nop
- // ----- pc_after points here
-
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
+ ASSERT(!InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ static const int kInstrSize = Assembler::kInstrSize;
// Replace the sltu instruction with load-imm 1 to at, so beq is not taken.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->addiu(at, zero_reg, 1);
-
// Replace the stack check address in the load-immediate (lui/ori pair)
// with the entry address of the replacement code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(check_code->entry()));
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
replacement_code->entry());
- // We patched the code to the following form:
- // addiu at, zero_reg, 1
- // beq at, zero_reg, ok ;; Not changed
- // lui t9, <on-stack replacement address> upper
- // ori t9, <on-stack replacement address> lower
- // jalr t9 ;; Not changed
- // nop ;; Not changed
- // ----- pc_after points here
-
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 4 * kInstrSize, replacement_code);
}
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
- // Exact opposite of the function above.
- const int kInstrSize = Assembler::kInstrSize;
- ASSERT(Assembler::IsAddImmediate(
- Assembler::instr_at(pc_after - 6 * kInstrSize)));
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
-
+void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ static const int kInstrSize = Assembler::kInstrSize;
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
patcher.masm()->slt(at, a3, zero_reg);
-
- // Replace the on-stack replacement address in the load-immediate (lui/ori
- // pair) with the entry address of the normal stack-check code.
- ASSERT(reinterpret_cast<uint32_t>(
- Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
- reinterpret_cast<uint32_t>(replacement_code->entry()));
+ // Restore the original call address.
Assembler::set_target_address_at(pc_after - 4 * kInstrSize,
- check_code->entry());
+ interrupt_code->entry());
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_after - 4 * kInstrSize, check_code);
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, pc_after - 4 * kInstrSize, interrupt_code);
}
+#ifdef DEBUG
+bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ static const int kInstrSize = Assembler::kInstrSize;
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc_after - 5 * kInstrSize)));
+ if (Assembler::IsAddImmediate(
+ Assembler::instr_at(pc_after - 6 * kInstrSize))) {
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(replacement_code->entry()));
+ return true;
+ } else {
+ ASSERT(reinterpret_cast<uint32_t>(
+ Assembler::target_address_at(pc_after - 4 * kInstrSize)) ==
+ reinterpret_cast<uint32_t>(interrupt_code->entry()));
+ return false;
+ }
+}
+#endif // DEBUG
+
+
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
ByteArray* translations = data->TranslationByteArray();
int length = data->DeoptCount();
@@ -586,17 +603,12 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- // Save all FPU registers before messing with them.
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int offset = i * kDoubleSize;
- __ sdc1(fpu_reg, MemOperand(sp, offset));
- }
- } else {
- __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ // Save all FPU registers before messing with them.
+ __ Subu(sp, sp, Operand(kDoubleRegsSize));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int offset = i * kDoubleSize;
+ __ sdc1(fpu_reg, MemOperand(sp, offset));
}
// Push saved_regs (needed to populate FrameDescription::registers_).
@@ -669,16 +681,13 @@ void Deoptimizer::EntryGenerator::Generate() {
}
int double_regs_offset = FrameDescription::double_registers_offset();
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- // Copy FPU registers to
- // double_registers_[DoubleRegister::kNumAllocatableRegisters]
- for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
- int dst_offset = i * kDoubleSize + double_regs_offset;
- int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
- __ ldc1(f0, MemOperand(sp, src_offset));
- __ sdc1(f0, MemOperand(a1, dst_offset));
- }
+ // Copy FPU registers to
+ // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+ for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+ int dst_offset = i * kDoubleSize + double_regs_offset;
+ int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+ __ ldc1(f0, MemOperand(sp, src_offset));
+ __ sdc1(f0, MemOperand(a1, dst_offset));
}
// Remove the bailout id, eventually return address, and the saved registers
@@ -747,15 +756,11 @@ void Deoptimizer::EntryGenerator::Generate() {
__ bind(&outer_loop_header);
__ Branch(&outer_push_loop, lt, t0, Operand(a1));
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
-
- __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
- for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
- const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
- int src_offset = i * kDoubleSize + double_regs_offset;
- __ ldc1(fpu_reg, MemOperand(a1, src_offset));
- }
+ __ lw(a1, MemOperand(a0, Deoptimizer::input_offset()));
+ for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+ const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+ int src_offset = i * kDoubleSize + double_regs_offset;
+ __ ldc1(fpu_reg, MemOperand(a1, src_offset));
}
// Push state, pc, and continuation from the last output frame.
diff --git a/deps/v8/src/mips/disasm-mips.cc b/deps/v8/src/mips/disasm-mips.cc
index 0eca71f2b..b787f13fe 100644
--- a/deps/v8/src/mips/disasm-mips.cc
+++ b/deps/v8/src/mips/disasm-mips.cc
@@ -1051,8 +1051,8 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
- fprintf(f, "%p %08x %s\n",
- prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+ v8::internal::PrintF(f, "%p %08x %s\n",
+ prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}
diff --git a/deps/v8/src/mips/frames-mips.h b/deps/v8/src/mips/frames-mips.h
index 1568ce66e..f6f20cd20 100644
--- a/deps/v8/src/mips/frames-mips.h
+++ b/deps/v8/src/mips/frames-mips.h
@@ -152,18 +152,6 @@ const int kSafepointRegisterStackIndexMap[kNumRegs] = {
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 3e7c8da7a..8e2d5abbe 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -170,8 +170,6 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- int locals_count = info->scope()->num_stack_slots();
-
info->set_prologue_offset(masm_->pc_offset());
// The following three instructions must remain together and unmodified for
// code aging to work properly.
@@ -183,6 +181,9 @@ void FullCodeGenerator::Generate() {
__ Addu(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
+ int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
for (int i = 0; i < locals_count; i++) {
__ push(at);
}
@@ -673,17 +674,9 @@ void FullCodeGenerator::DoTest(Expression* condition,
Label* if_true,
Label* if_false,
Label* fall_through) {
- if (CpuFeatures::IsSupported(FPU)) {
- ToBooleanStub stub(result_register());
- __ CallStub(&stub, condition->test_id());
- __ mov(at, zero_reg);
- } else {
- // Call the runtime to find the boolean value of the source and then
- // translate it into control flow to the pair of labels.
- __ push(result_register());
- __ CallRuntime(Runtime::kToBool, 1);
- __ LoadRoot(at, Heap::kFalseValueRootIndex);
- }
+ ToBooleanStub stub(result_register());
+ __ CallStub(&stub, condition->test_id());
+ __ mov(at, zero_reg);
Split(ne, v0, Operand(at), if_true, if_false, fall_through);
}
@@ -1276,7 +1269,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ li(a0, Operand(info));
__ push(a0);
__ CallStub(&stub);
@@ -1737,7 +1730,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
__ li(a2, Operand(Smi::FromInt(expr->literal_index())));
__ li(a1, Operand(constant_elements));
- __ Push(a3, a2, a1);
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
@@ -1748,8 +1740,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(),
1, a1, a2);
} else if (expr->depth() > 1) {
+ __ Push(a3, a2, a1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(a3, a2, a1);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@@ -3042,31 +3037,21 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in v0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
- if (CpuFeatures::IsSupported(FPU)) {
- __ PrepareCallCFunction(1, a0);
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
- __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
-
- CpuFeatureScope scope(masm(), FPU);
- // 0x41300000 is the top half of 1.0 x 2^20 as a double.
- __ li(a1, Operand(0x41300000));
- // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
- __ Move(f12, v0, a1);
- // Move 0x4130000000000000 to FPU.
- __ Move(f14, zero_reg, a1);
- // Subtract and store the result in the heap number.
- __ sub_d(f0, f12, f14);
- __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
- __ mov(v0, s0);
- } else {
- __ PrepareCallCFunction(2, a0);
- __ mov(a0, s0);
- __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
- __ CallCFunction(
- ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
- }
+ __ PrepareCallCFunction(1, a0);
+ __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ lw(a0, FieldMemOperand(a0, GlobalObject::kNativeContextOffset));
+ __ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
+
+ // 0x41300000 is the top half of 1.0 x 2^20 as a double.
+ __ li(a1, Operand(0x41300000));
+ // Move 0x41300000xxxxxxxx (x = random bits in v0) to FPU.
+ __ Move(f12, v0, a1);
+ // Move 0x4130000000000000 to FPU.
+ __ Move(f14, zero_reg, a1);
+ // Subtract and store the result in the heap number.
+ __ sub_d(f0, f12, f14);
+ __ sdc1(f0, FieldMemOperand(s0, HeapNumber::kValueOffset));
+ __ mov(v0, s0);
context()->Plug(v0);
}
@@ -3204,12 +3189,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
- if (CpuFeatures::IsSupported(FPU)) {
- MathPowStub stub(MathPowStub::ON_STACK);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kMath_pow, 2);
- }
+ MathPowStub stub(MathPowStub::ON_STACK);
+ __ CallStub(&stub);
context()->Plug(v0);
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index b5d6c451f..59cfcd900 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -65,7 +65,6 @@ bool LCodeGen::GenerateCode() {
HPhase phase("Z_Code generation", chunk());
ASSERT(is_unused());
status_ = GENERATING;
- CpuFeatureScope scope(masm(), FPU);
// Open a frame scope to indicate that there is a frame on the stack. The
// NONE indicates that the scope shouldn't actually generate code to set up
@@ -114,7 +113,7 @@ void LCodeGen::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
@@ -193,8 +192,7 @@ bool LCodeGen::GeneratePrologue() {
}
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
+ if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
@@ -829,7 +827,9 @@ void LCodeGen::DeoptimizeIf(Condition cc,
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on MIPS.
- if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
+ if (FLAG_deopt_every_n_times == 1 &&
+ !info()->IsStub() &&
+ info()->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@@ -1475,7 +1475,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DoubleRegister result = ToDoubleRegister(instr->result());
- CpuFeatureScope scope(masm(), FPU);
double v = instr->value();
__ Move(result, v);
}
@@ -1666,7 +1665,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ bind(&done);
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
- CpuFeatureScope scope(masm(), FPU);
FPURegister left_reg = ToDoubleRegister(left);
FPURegister right_reg = ToDoubleRegister(right);
FPURegister result_reg = ToDoubleRegister(instr->result());
@@ -1707,7 +1705,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister left = ToDoubleRegister(instr->left());
DoubleRegister right = ToDoubleRegister(instr->right());
DoubleRegister result = ToDoubleRegister(instr->result());
@@ -1817,7 +1814,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
Register reg = ToRegister(instr->value());
EmitBranch(true_block, false_block, ne, reg, Operand(zero_reg));
} else if (r.IsDouble()) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister reg = ToDoubleRegister(instr->value());
// Test the double value. Zero and NaN are false.
EmitBranchF(true_block, false_block, nue, reg, kDoubleRegZero);
@@ -1902,7 +1898,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
- CpuFeatureScope scope(masm(), FPU);
// heap number -> false iff +0, -0, or NaN.
DoubleRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@@ -1982,7 +1977,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
- CpuFeatureScope scope(masm(), FPU);
// Compare left and right as doubles and load the
// resulting flags into the normal status register.
FPURegister left_reg = ToDoubleRegister(left);
@@ -2545,8 +2539,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(v0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
- if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
+ if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
@@ -2933,61 +2926,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ sll(scratch0(), key, shift_size);
__ Addu(scratch0(), scratch0(), external_pointer);
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Register value = external_pointer;
- __ lw(value, MemOperand(scratch0(), additional_offset));
- __ And(sfpd_lo, value, Operand(kBinary32MantissaMask));
-
- __ srl(scratch0(), value, kBinary32MantissaBits);
- __ And(scratch0(), scratch0(),
- Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
-
- Label exponent_rebiased;
- __ Xor(at, scratch0(), Operand(0x00));
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- __ Xor(at, scratch0(), Operand(0xff));
- Label skip;
- __ Branch(&skip, ne, at, Operand(zero_reg));
- __ li(scratch0(), Operand(0x7ff));
- __ bind(&skip);
- __ Branch(&exponent_rebiased, eq, at, Operand(zero_reg));
-
- // Rebias exponent.
- __ Addu(scratch0(),
- scratch0(),
- Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
-
- __ bind(&exponent_rebiased);
- __ And(sfpd_hi, value, Operand(kBinary32SignMask));
- __ sll(at, scratch0(), HeapNumber::kMantissaBitsInTopWord);
- __ Or(sfpd_hi, sfpd_hi, at);
-
- // Shift mantissa.
- static const int kMantissaShiftForHiWord =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaShiftForLoWord =
- kBitsPerInt - kMantissaShiftForHiWord;
-
- __ srl(at, sfpd_lo, kMantissaShiftForHiWord);
- __ Or(sfpd_hi, sfpd_hi, at);
- __ sll(sfpd_lo, sfpd_lo, kMantissaShiftForLoWord);
-
- } else {
- __ lw(sfpd_lo, MemOperand(scratch0(), additional_offset));
- __ lw(sfpd_hi, MemOperand(scratch0(),
- additional_offset + kPointerSize));
- }
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
}
} else {
Register result = ToRegister(instr->result());
@@ -3062,21 +3005,11 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ sll(scratch, key, shift_size);
__ Addu(elements, elements, scratch);
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ Addu(elements, elements, Operand(base_offset));
- __ ldc1(result, MemOperand(elements));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
- DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
- }
- } else {
- __ lw(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
- __ lw(sfpd_lo, MemOperand(elements, base_offset));
- if (instr->hydrogen()->RequiresHoleCheck()) {
- ASSERT(kPointerSize == sizeof(kHoleNanLower32));
- DeoptimizeIf(eq, instr->environment(), sfpd_hi, Operand(kHoleNanUpper32));
- }
+ __ Addu(elements, elements, Operand(base_offset));
+ __ ldc1(result, MemOperand(elements));
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ __ lw(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
+ DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
}
}
@@ -3445,7 +3378,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@@ -3510,7 +3443,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
@@ -3523,20 +3456,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), FPU);
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
@@ -3560,8 +3491,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), FPU);
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch1 = scratch0();
@@ -3589,8 +3519,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), FPU);
+void LCodeGen::DoMathRound(LMathRound* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
@@ -3666,16 +3595,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), FPU);
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
__ sqrt_d(result, input);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
- CpuFeatureScope scope(masm(), FPU);
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister temp = ToDoubleRegister(instr->temp());
@@ -3700,7 +3627,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
- CpuFeatureScope scope(masm(), FPU);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@@ -3731,7 +3657,6 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
- CpuFeatureScope scope(masm(), FPU);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@@ -3808,7 +3733,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister input = ToDoubleRegister(instr->value());
DoubleRegister result = ToDoubleRegister(instr->result());
DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
@@ -3822,7 +3746,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
@@ -3830,7 +3754,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
@@ -3838,7 +3762,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
@@ -3846,7 +3770,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
@@ -3854,42 +3778,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
- default:
- Abort("Unimplemented type of LUnaryMathOperation.");
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(a1));
ASSERT(instr->HasPointerMap());
@@ -4111,7 +3999,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), FPU);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@@ -4185,7 +4072,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- CpuFeatureScope scope(masm(), FPU);
DoubleRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@@ -4489,7 +4375,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
- CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@@ -4507,7 +4392,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
- CpuFeatureScope scope(masm(), FPU);
LOperand* input = instr->value();
LOperand* output = instr->result();
@@ -4569,45 +4453,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
-// Convert unsigned integer with specified number of leading zeroes in binary
-// representation to IEEE 754 double.
-// Integer to convert is passed in register src.
-// Resulting double is returned in registers hiword:loword.
-// This functions does not work correctly for 0.
-static void GenerateUInt2Double(MacroAssembler* masm,
- Register src,
- Register hiword,
- Register loword,
- Register scratch,
- int leading_zeroes) {
- const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
- const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
-
- const int mantissa_shift_for_hi_word =
- meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
- const int mantissa_shift_for_lo_word =
- kBitsPerInt - mantissa_shift_for_hi_word;
- masm->li(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
- if (mantissa_shift_for_hi_word > 0) {
- masm->sll(loword, src, mantissa_shift_for_lo_word);
- masm->srl(hiword, src, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- } else {
- masm->mov(loword, zero_reg);
- masm->sll(hiword, src, mantissa_shift_for_hi_word);
- masm->Or(hiword, scratch, hiword);
- }
-
- // If least significant bit of biased exponent was not 1 it was corrupted
- // by most significant bit of mantissa so we should fix that.
- if (!(biased_exponent & 1)) {
- masm->li(scratch, 1 << HeapNumber::kExponentShift);
- masm->nor(scratch, scratch, scratch);
- masm->and_(hiword, hiword, scratch);
- }
-}
-
-
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
@@ -4628,35 +4473,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ Xor(src, src, Operand(0x80000000));
}
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ mtc1(src, dbl_scratch);
- __ cvt_d_w(dbl_scratch, dbl_scratch);
- } else {
- FloatingPointHelper::Destination dest =
- FloatingPointHelper::kCoreRegisters;
- FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, f0,
- sfpd_lo, sfpd_hi,
- scratch0(), f2);
- }
+ __ mtc1(src, dbl_scratch);
+ __ cvt_d_w(dbl_scratch, dbl_scratch);
} else {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ mtc1(src, dbl_scratch);
- __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
- } else {
- Label no_leading_zero, convert_done;
- __ And(at, src, Operand(0x80000000));
- __ Branch(&no_leading_zero, ne, at, Operand(zero_reg));
-
- // Integer has one leading zeros.
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 1);
- __ Branch(&convert_done);
-
- __ bind(&no_leading_zero);
- GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, t0, 0);
- __ bind(&convert_done);
- }
+ __ mtc1(src, dbl_scratch);
+ __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
}
if (FLAG_inline_new) {
@@ -4680,13 +4501,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
- __ sw(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
- }
+ __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
__ Addu(dst, dst, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4719,39 +4534,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Label no_special_nan_handling;
Label done;
if (convert_hole) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- DoubleRegister input_reg = ToDoubleRegister(instr->value());
- __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
- __ Move(reg, scratch0(), input_reg);
- Label canonicalize;
- __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&canonicalize);
- __ Move(input_reg,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- } else {
- Label not_hole;
- __ Branch(&not_hole, ne, sfpd_hi, Operand(kHoleNanUpper32));
- __ li(reg, factory()->the_hole_value());
- __ Branch(&done);
- __ bind(&not_hole);
- __ And(scratch, sfpd_hi, Operand(0x7ff00000));
- __ Branch(&no_special_nan_handling, ne, scratch, Operand(0x7ff00000));
- Label special_nan_handling;
- __ And(at, sfpd_hi, Operand(0x000FFFFF));
- __ Branch(&special_nan_handling, ne, at, Operand(zero_reg));
- __ Branch(&no_special_nan_handling, eq, sfpd_lo, Operand(zero_reg));
- __ bind(&special_nan_handling);
- double canonical_nan =
- FixedDoubleArray::canonical_not_the_hole_nan_as_double();
- uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
- __ li(sfpd_lo,
- Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
- __ li(sfpd_hi,
- Operand(static_cast<uint32_t>(casted_nan >> 32)));
- }
+ DoubleRegister input_reg = ToDoubleRegister(instr->value());
+ __ BranchF(&no_special_nan_handling, NULL, eq, input_reg, input_reg);
+ __ Move(reg, scratch0(), input_reg);
+ Label canonicalize;
+ __ Branch(&canonicalize, ne, scratch0(), Operand(kHoleNanUpper32));
+ __ li(reg, factory()->the_hole_value());
+ __ Branch(&done);
+ __ bind(&canonicalize);
+ __ Move(input_reg,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double());
}
__ bind(&no_special_nan_handling);
@@ -4765,13 +4557,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm(), FPU);
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- } else {
- __ sw(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
- __ sw(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
- }
+ __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
// Now that we have finished with the object's real address tag it
__ Addu(reg, reg, kHeapObjectTag);
__ bind(&done);
@@ -4821,7 +4607,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
LEnvironment* env,
NumberUntagDMode mode) {
Register scratch = scratch0();
- CpuFeatureScope scope(masm(), FPU);
Label load_smi, heap_number, done;
@@ -4899,7 +4684,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// of the if.
if (instr->truncating()) {
- CpuFeatureScope scope(masm(), FPU);
Register scratch3 = ToRegister(instr->temp2());
FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
@@ -5154,7 +4938,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
@@ -5163,7 +4946,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@@ -5171,7 +4953,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
- CpuFeatureScope vfp_scope(masm(), FPU);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
@@ -5207,7 +4988,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@@ -5220,8 +5000,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
- __ LoadHeapObject(prototype_reg,
- prototypes->at(prototypes->length() - 1));
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
@@ -5337,11 +5115,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
- }
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5369,7 +5147,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
__ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr);
+ }
__ StoreToSafepointRegisterSlot(v0, result);
}
@@ -5405,7 +5189,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ li(a1, Operand(isolate()->factory()->empty_fixed_array()));
- __ Push(a3, a2, a1);
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@@ -5416,8 +5199,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
+ __ Push(a3, a2, a1);
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ Push(a3, a2, a1);
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
@@ -5430,170 +5215,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(a2));
- ASSERT(!result.is(a2));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ Addu(a2, result, Operand(elements_offset));
- } else {
- __ lw(a2, FieldMemOperand(source, i));
- }
- __ sw(a2, FieldMemOperand(result, object_offset + i));
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ li(a2, Operand(Handle<Map>(isolate()->heap()->
- allocation_site_info_map())));
- __ sw(a2, FieldMemOperand(result, object_size));
- __ sw(source, FieldMemOperand(result, object_size + kPointerSize));
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ lw(a2, FieldMemOperand(source, i));
- __ sw(a2, FieldMemOperand(result, elements_offset + i));
- }
-
- // Copy elements backing store content.
- int elements_length = has_elements ? elements->length() : 0;
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- // We only support little endian mode...
- int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
- int32_t value_high = static_cast<int32_t>(value >> 32);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ li(a2, Operand(value_low));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ li(a2, Operand(value_high));
- __ sw(a2, FieldMemOperand(result, total_offset + 4));
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ Addu(a2, result, Operand(*offset));
- __ sw(a2, FieldMemOperand(result, total_offset));
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(a2, Handle<HeapObject>::cast(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- } else {
- __ li(a2, Operand(value));
- __ sw(a2, FieldMemOperand(result, total_offset));
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- // Load map into a2.
- __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ lbu(a2, FieldMemOperand(a2, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ Ext(a2, a2, Map::kElementsKindShift, Map::kElementsKindBitCount);
- DeoptimizeIf(ne, instr->environment(), a2,
- Operand(boilerplate_elements_kind));
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ li(a0, Operand(Smi::FromInt(size)));
- __ push(a0);
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(a1, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), v0, a1, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
ASSERT(ToRegister(instr->result()).is(v0));
Handle<FixedArray> literals(instr->environment()->closure()->literals());
@@ -5691,7 +5312,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
+ FastNewClosureStub stub(shared_info->language_mode(),
+ shared_info->is_generator());
__ li(a1, Operand(shared_info));
__ push(a1);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index bb2003f1d..a1bc4ba4d 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -132,7 +132,7 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
@@ -289,17 +289,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DoubleRegister ToDoubleRegister(int index) const;
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
diff --git a/deps/v8/src/mips/lithium-gap-resolver-mips.cc b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
index b41515673..87efae5f4 100644
--- a/deps/v8/src/mips/lithium-gap-resolver-mips.cc
+++ b/deps/v8/src/mips/lithium-gap-resolver-mips.cc
@@ -172,10 +172,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ lw(kLithiumScratchReg, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@@ -195,11 +193,9 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ sw(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ mov_d(cgen_->ToDoubleRegister(saved_destination_),
kLithiumScratchDouble);
} else if (saved_destination_->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
__ sdc1(kLithiumScratchDouble,
cgen_->ToMemOperand(saved_destination_));
} else {
@@ -236,7 +232,6 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsInt16Encodable()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
// 'at' is overwritten while saving the value to the destination.
// Therefore we can't use 'at'. It is OK if the read from the source
// destroys 'at', since that happens before the value is read.
@@ -276,7 +271,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
DoubleRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ mov_d(cgen_->ToDoubleRegister(destination), source_register);
@@ -287,7 +281,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
- CpuFeatureScope scope(cgen_->masm(), FPU);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 652c7cad0..afa806c4e 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -302,17 +302,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1123,50 +1112,103 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), f4);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
- return MarkAsCall(DefineFixedDouble(result, f4), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
- } else if (op == kMathPowHalf) {
- // Input cannot be the same as the result.
- // See lithium-codegen-mips.cc::DoMathPowHalf.
- LOperand* input = UseFixedDouble(instr->value(), f8);
- LOperand* temp = FixedTemp(f6);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- return DefineFixedDouble(result, f4);
- } else {
- LOperand* input = UseRegister(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
- (op == kMathFloor) ? TempRegister() : NULL;
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathFloor:
- return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
- case kMathSqrt:
- return DefineAsRegister(result);
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), f4);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* input = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
+ LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+ LOperand* input = UseFixedDouble(instr->value(), f8);
+ LOperand* temp = FixedTemp(f6);
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+ return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = TempRegister();
+ LMathFloor* result = new(zone()) LMathFloor(input, temp);
+ return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegister(instr->value());
+ LOperand* temp = FixedTemp(f6);
+ LMathRound* result = new(zone()) LMathRound(input, temp);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
@@ -1807,7 +1849,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
+ return AssignEnvironment(result);
}
@@ -2008,16 +2050,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- // float->double conversion on non-VFP2 requires an extra scratch
- // register. For convenience, just mark the elements register as "UseTemp"
- // so that it can be used as a temp during the float->double conversion
- // after it's no longer needed after the float load.
- bool needs_temp =
- !CpuFeatures::IsSupported(FPU) &&
- (elements_kind == EXTERNAL_FLOAT_ELEMENTS);
- LOperand* external_pointer = needs_temp
- ? UseTempRegister(instr->elements())
- : UseRegister(instr->elements());
+ LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@@ -2217,11 +2250,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, v0), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, v0), instr);
}
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index e99590545..b0fc59a3b 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -97,7 +97,6 @@ class LCodeGen;
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@@ -134,8 +133,17 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
+ V(MathAbs) \
+ V(MathCos) \
V(MathExp) \
+ V(MathFloor) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@@ -178,7 +186,6 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf) \
V(ForInPrepareMap) \
@@ -663,9 +670,9 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
+class LMathFloor: public LTemplateInstruction<1, 1, 1> {
public:
- LUnaryMathOperation(LOperand* value, LOperand* temp) {
+ LMathFloor(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@@ -673,11 +680,84 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathRound: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathRound(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathAbs(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathSin: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathCos: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
@@ -700,8 +780,32 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
- virtual void PrintDataTo(StringStream* stream);
+
+class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
+ public:
+ LMathPowHalf(LOperand* value, LOperand* temp) {
+ inputs_[0] = value;
+ temps_[0] = temp;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+ LOperand* temp() { return temps_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -1255,7 +1359,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -2184,7 +2288,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2300,13 +2404,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -2550,6 +2647,17 @@ class LChunkBuilder BASE_EMBEDDED {
LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 12e102504..6f9891469 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -369,7 +369,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
// Read the first word and compare to the native_context_map.
lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
@@ -385,7 +384,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
- // TODO(119): Avoid push(holder_reg)/pop(holder_reg).
push(holder_reg); // Temporarily save holder on the stack.
mov(holder_reg, at); // Move at to its holding place.
LoadRoot(at, Heap::kNullValueRootIndex);
@@ -853,7 +851,6 @@ void MacroAssembler::MultiPopReversed(RegList regs) {
void MacroAssembler::MultiPushFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -868,7 +865,6 @@ void MacroAssembler::MultiPushFPU(RegList regs) {
void MacroAssembler::MultiPushReversedFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t num_to_push = NumberOfBitsSet(regs);
int16_t stack_offset = num_to_push * kDoubleSize;
@@ -883,7 +879,6 @@ void MacroAssembler::MultiPushReversedFPU(RegList regs) {
void MacroAssembler::MultiPopFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = 0; i < kNumRegisters; i++) {
@@ -897,7 +892,6 @@ void MacroAssembler::MultiPopFPU(RegList regs) {
void MacroAssembler::MultiPopReversedFPU(RegList regs) {
- CpuFeatureScope scope(this, FPU);
int16_t stack_offset = 0;
for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
@@ -1170,7 +1164,6 @@ void MacroAssembler::BranchF(Label* target,
void MacroAssembler::Move(FPURegister dst, double imm) {
- ASSERT(IsEnabled(FPU));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@@ -1340,61 +1333,17 @@ void MacroAssembler::ConvertToInt32(Register source,
Subu(scratch2, scratch2, Operand(zero_exponent));
// Dest already has a Smi zero.
Branch(&done, lt, scratch2, Operand(zero_reg));
- if (!CpuFeatures::IsSupported(FPU)) {
- // We have a shifted exponent between 0 and 30 in scratch2.
- srl(dest, scratch2, HeapNumber::kExponentShift);
- // We now have the exponent in dest. Subtract from 30 to get
- // how much to shift down.
- li(at, Operand(30));
- subu(dest, at, dest);
- }
bind(&right_exponent);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(this, FPU);
- // MIPS FPU instructions implementing double precision to integer
- // conversion using round to zero. Since the FP value was qualified
- // above, the resulting integer should be a legal int32.
- // The original 'Exponent' word is still in scratch.
- lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
- trunc_w_d(double_scratch, double_scratch);
- mfc1(dest, double_scratch);
- } else {
- // On entry, dest has final downshift, scratch has original sign/exp/mant.
- // Save sign bit in top bit of dest.
- And(scratch2, scratch, Operand(0x80000000));
- Or(dest, dest, Operand(scratch2));
- // Put back the implicit 1, just above mantissa field.
- Or(scratch, scratch, Operand(1 << HeapNumber::kExponentShift));
-
- // Shift up the mantissa bits to take up the space the exponent used to
- // take. We just orred in the implicit bit so that took care of one and
- // we want to leave the sign bit 0 so we subtract 2 bits from the shift
- // distance. But we want to clear the sign-bit so shift one more bit
- // left, then shift right one bit.
- const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
- sll(scratch, scratch, shift_distance + 1);
- srl(scratch, scratch, 1);
-
- // Get the second half of the double. For some exponents we don't
- // actually need this because the bits get shifted out again, but
- // it's probably slower to test than just to do it.
- lw(scratch2, FieldMemOperand(source, HeapNumber::kMantissaOffset));
- // Extract the top 10 bits, and insert those bottom 10 bits of scratch.
- // The width of the field here is the same as the shift amount above.
- const int field_width = shift_distance;
- Ext(scratch2, scratch2, 32-shift_distance, field_width);
- Ins(scratch, scratch2, 0, field_width);
- // Move down according to the exponent.
- srlv(scratch, scratch, dest);
- // Prepare the negative version of our integer.
- subu(scratch2, zero_reg, scratch);
- // Trick to check sign bit (msb) held in dest, count leading zero.
- // 0 indicates negative, save negative version with conditional move.
- Clz(dest, dest);
- Movz(scratch, scratch2, dest);
- mov(dest, scratch);
- }
+
+ // MIPS FPU instructions implementing double precision to integer
+ // conversion using round to zero. Since the FP value was qualified
+ // above, the resulting integer should be a legal int32.
+ // The original 'Exponent' word is still in scratch.
+ lwc1(double_scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
+ mtc1(scratch, FPURegister::from_code(double_scratch.code() + 1));
+ trunc_w_d(double_scratch, double_scratch);
+ mfc1(dest, double_scratch);
+
bind(&done);
}
@@ -1410,8 +1359,6 @@ void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
ASSERT(!double_input.is(double_scratch));
ASSERT(!except_flag.is(scratch));
- ASSERT(CpuFeatures::IsSupported(FPU));
- CpuFeatureScope scope(this, FPU);
Label done;
// Clear the except flag (0 = no exception)
@@ -1553,7 +1500,6 @@ void MacroAssembler::EmitECMATruncate(Register result,
Register scratch,
Register scratch2,
Register scratch3) {
- CpuFeatureScope scope(this, FPU);
ASSERT(!scratch2.is(result));
ASSERT(!scratch3.is(result));
ASSERT(!scratch3.is(scratch2));
@@ -3461,11 +3407,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ destination = FloatingPointHelper::kFPURegisters;
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
@@ -3478,7 +3420,6 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
scratch4,
f2);
if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(this, FPU);
sdc1(f0, MemOperand(scratch1, 0));
} else {
sw(mantissa_reg, MemOperand(scratch1, 0));
@@ -3571,7 +3512,6 @@ void MacroAssembler::CheckMap(Register obj,
void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
- CpuFeatureScope scope(this, FPU);
if (IsMipsSoftFloatABI) {
Move(dst, v0, v1);
} else {
@@ -3581,7 +3521,6 @@ void MacroAssembler::GetCFunctionDoubleResult(const DoubleRegister dst) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
} else {
@@ -3592,7 +3531,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
DoubleRegister dreg2) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
if (dreg2.is(f12)) {
ASSERT(!dreg1.is(f14));
@@ -3611,7 +3549,6 @@ void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DoubleRegister dreg,
Register reg) {
- CpuFeatureScope scope(this, FPU);
if (!IsMipsSoftFloatABI) {
Move(f12, dreg);
Move(a2, reg);
@@ -4254,10 +4191,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
PrepareCEntryArgs(function->nargs);
PrepareCEntryFunction(ExternalReference(function, isolate()));
- SaveFPRegsMode mode = CpuFeatures::IsSupported(FPU)
- ? kSaveFPRegs
- : kDontSaveFPRegs;
- CEntryStub stub(1, mode);
+ CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@@ -4649,7 +4583,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles,
const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
if (save_doubles) {
- CpuFeatureScope scope(this, FPU);
// The stack must be allign to 0 modulo 8 for stores with sdc1.
ASSERT(kDoubleSize == frame_alignment);
if (frame_alignment > 0) {
@@ -4687,7 +4620,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
bool do_return) {
// Optionally restore all double registers.
if (save_doubles) {
- CpuFeatureScope scope(this, FPU);
// Remember: we only need to restore every 2nd double FPU value.
lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset));
for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) {
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index 5eb79eb78..bc384357c 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -867,7 +867,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
- memcpy(cached_line, line, CachePage::kLineLength);
+ OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@@ -1059,8 +1059,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+ OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+ OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@@ -1091,12 +1091,14 @@ double Simulator::get_fpu_register_double(int fpureg) const {
}
-// For use in calls that take two double values, constructed either
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
// from a0-a3 or f12 and f14.
-void Simulator::GetFpArgs(double* x, double* y) {
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (!IsMipsSoftFloatABI) {
*x = get_fpu_register_double(12);
*y = get_fpu_register_double(14);
+ *z = get_register(a2);
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
@@ -1106,53 +1108,14 @@ void Simulator::GetFpArgs(double* x, double* y) {
// Registers a0 and a1 -> x.
reg_buffer[0] = get_register(a0);
reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
+ OS::MemCopy(x, buffer, sizeof(buffer));
// Registers a2 and a3 -> y.
reg_buffer[0] = get_register(a2);
reg_buffer[1] = get_register(a3);
- memcpy(y, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value, constructed either
-// from a0 and a1 or f12.
-void Simulator::GetFpArgs(double* x) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers a0 and a1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
- }
-}
-
-
-// For use in calls that take one double value constructed either
-// from a0 and a1 or f12 and one integer value.
-void Simulator::GetFpArgs(double* x, int32_t* y) {
- if (!IsMipsSoftFloatABI) {
- *x = get_fpu_register_double(12);
- *y = get_register(a2);
- } else {
- // We use a char buffer to get around the strict-aliasing rules which
- // otherwise allow the compiler to optimize away the copy.
- char buffer[sizeof(*x)];
- int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- // Registers 0 and 1 -> x.
- reg_buffer[0] = get_register(a0);
- reg_buffer[1] = get_register(a1);
- memcpy(x, buffer, sizeof(buffer));
-
- // Register 2 -> y.
+ OS::MemCopy(y, buffer, sizeof(buffer));
+ // Register 2 -> z.
reg_buffer[0] = get_register(a2);
- memcpy(y, buffer, sizeof(*y));
+ OS::MemCopy(z, buffer, sizeof(*z));
}
}
@@ -1164,7 +1127,7 @@ void Simulator::SetFpResult(const double& result) {
} else {
char buffer[2 * sizeof(registers_[0])];
int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
- memcpy(buffer, &result, sizeof(buffer));
+ OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to v0 and v1.
set_register(v0, reg_buffer[0]);
set_register(v1, reg_buffer[1]);
@@ -1415,10 +1378,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
-typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
- int32_t arg1,
- int32_t arg2,
- int32_t arg3);
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@@ -1495,46 +1460,81 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// simulator. Soft-float has additional abstraction of ExternalReference,
// to support serialization.
if (fp_call) {
- SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ double dval0, dval1; // one or two double parameters
+ int32_t ival; // zero or one integer parameters
+ int64_t iresult = 0; // integer return value
+ double dresult = 0; // double return value
+ GetFpArgs(&dval0, &dval1, &ival);
+ SimulatorRuntimeCall generic_target =
+ reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim) {
- double dval0, dval1;
- int32_t ival;
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
- GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
- FUNCTION_ADDR(target), dval0, dval1);
+ FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
- GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
- FUNCTION_ADDR(target), dval0);
+ FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
- GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
- FUNCTION_ADDR(target), dval0, ival);
+ FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
break;
}
}
- if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL: {
+ SimulatorRuntimeCompareCall target =
+ reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+ iresult = target(dval0, dval1);
+ set_register(v0, static_cast<int32_t>(iresult));
+ set_register(v1, static_cast<int32_t>(iresult >> 32));
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_FP_CALL: {
+ SimulatorRuntimeFPFPCall target =
+ reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+ dresult = target(dval0, dval1);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
- reinterpret_cast<SimulatorRuntimeFPCall>(external);
- double result = target(arg0, arg1, arg2, arg3);
- SetFpResult(result);
- } else {
- SimulatorRuntimeCall target =
- reinterpret_cast<SimulatorRuntimeCall>(external);
- uint64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
- int32_t gpreg_pair[2];
- memcpy(&gpreg_pair[0], &result, 2 * sizeof(int32_t));
- set_register(v0, gpreg_pair[0]);
- set_register(v1, gpreg_pair[1]);
+ reinterpret_cast<SimulatorRuntimeFPCall>(external);
+ dresult = target(dval0);
+ SetFpResult(dresult);
+ break;
+ }
+ case ExternalReference::BUILTIN_FP_INT_CALL: {
+ SimulatorRuntimeFPIntCall target =
+ reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+ dresult = target(dval0, ival);
+ SetFpResult(dresult);
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ if (::v8::internal::FLAG_trace_sim) {
+ switch (redirection->type()) {
+ case ExternalReference::BUILTIN_COMPARE_CALL:
+ PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+ break;
+ case ExternalReference::BUILTIN_FP_FP_CALL:
+ case ExternalReference::BUILTIN_FP_CALL:
+ case ExternalReference::BUILTIN_FP_INT_CALL:
+ PrintF("Returned %f\n", dresult);
+ break;
+ default:
+ UNREACHABLE();
+ break;
+ }
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
// See DirectCEntryStub::GenerateCall for explanation of register usage.
@@ -2867,9 +2867,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) {
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
+ OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(a0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
+ OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(a2, buffer);
}
CallInternal(entry);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index ed6344342..a091e5fb2 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -351,10 +351,8 @@ class Simulator {
static void* RedirectExternalReference(void* external_function,
ExternalReference::Type type);
- // For use in calls that take double value arguments.
- void GetFpArgs(double* x, double* y);
- void GetFpArgs(double* x);
- void GetFpArgs(double* x, int32_t* y);
+ // Handle arguments and return value for runtime FP functions.
+ void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void CallInternal(byte* entry);
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index da6770a14..b9757fa13 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -410,29 +410,46 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
-// Generate StoreField code, value is passed in a0 register.
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ li(scratch, Operand(cell));
+ __ lw(scratch,
+ FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
+ __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+ __ Branch(miss, ne, scratch, Operand(at));
+}
+
+
+// Generate StoreTransition code, value is passed in a0 register.
// After executing generated code, the receiver_reg and name_reg
// may be clobbered.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- Label* miss_restore_name) {
+void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label,
+ Label* miss_restore_name) {
// a0 : value.
Label exit;
// Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
- DO_SMI_CHECK, mode);
+ DO_SMI_CHECK, REQUIRE_EXACT_MAP);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -440,7 +457,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
@@ -458,12 +475,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
- if (lookup->holder() == *object &&
- !holder->HasFastProperties() &&
- !holder->IsJSGlobalProxy() &&
- !holder->IsJSGlobalObject()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ if (lookup->holder() == *object) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm,
+ Handle<GlobalObject>(GlobalObject::cast(holder)),
+ name,
+ scratch1,
+ miss_restore_name);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ }
}
}
@@ -472,7 +495,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@@ -485,33 +508,114 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- int index;
- if (!transition.is_null()) {
- // Update the map of the object.
- __ li(scratch1, Operand(transition));
- __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+ // Update the map of the object.
+ __ li(scratch1, Operand(transition));
+ __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Pass the now unused name_reg as a scratch register.
+ __ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
+ offset,
name_reg,
+ scratch1,
kRAHasNotBeenSaved,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
+ kDontSaveFPRegs);
} else {
- index = lookup->GetFieldIndex().field_index();
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array
+ __ lw(scratch1,
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ sw(value_reg, FieldMemOperand(scratch1, offset));
+
+ // Skip updating write barrier if storing a smi.
+ __ JumpIfSmi(value_reg, &exit);
+
+ // Update the write barrier for the array address.
+ // Ok to clobber receiver_reg and name_reg, since we return.
+ __ mov(name_reg, value_reg);
+ __ RecordWriteField(scratch1,
+ offset,
+ name_reg,
+ receiver_reg,
+ kRAHasNotBeenSaved,
+ kDontSaveFPRegs);
+ }
+
+ // Return the value (register v0).
+ ASSERT(value_reg.is(a0));
+ __ bind(&exit);
+ __ mov(v0, a0);
+ __ Ret();
+}
+
+
+// Generate StoreField code, value is passed in a0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // a0 : value
+ Label exit;
+
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
+ DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -926,26 +1030,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ li(scratch, Operand(cell));
- __ lw(scratch,
- FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
- __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
- __ Branch(miss, ne, scratch, Operand(at));
-}
-
-
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@@ -975,72 +1059,12 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
- Register fval,
- Register scratch1,
- Register scratch2) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ mtc1(ival, f0);
- __ cvt_s_w(f0, f0);
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ swc1(f0, MemOperand(scratch1, 0));
- } else {
- // FPU is not available, do manual conversions.
-
- Label not_special, done;
- // Move sign bit from source to destination. This works because the sign
- // bit in the exponent word of the double has the same position and polarity
- // as the 2's complement sign bit in a Smi.
- ASSERT(kBinary32SignMask == 0x80000000u);
-
- __ And(fval, ival, Operand(kBinary32SignMask));
- // Negate value if it is negative.
- __ subu(scratch1, zero_reg, ival);
- __ Movn(ival, scratch1, fval);
-
- // We have -1, 0 or 1, which we treat specially. Register ival contains
- // absolute value: it is either equal to 1 (special case of -1 and 1),
- // greater than 1 (not a special case) or less than 1 (special case of 0).
- __ Branch(&not_special, gt, ival, Operand(1));
-
- // For 1 or -1 we need to or in the 0 exponent (biased).
- static const uint32_t exponent_word_for_1 =
- kBinary32ExponentBias << kBinary32ExponentShift;
-
- __ Xor(scratch1, ival, Operand(1));
- __ li(scratch2, exponent_word_for_1);
- __ or_(scratch2, fval, scratch2);
- __ Movz(fval, scratch2, scratch1); // Only if ival is equal to 1.
- __ Branch(&done);
-
- __ bind(&not_special);
- // Count leading zeros.
- // Gets the wrong answer for 0, but we already checked for that case above.
- Register zeros = scratch2;
- __ Clz(zeros, ival);
-
- // Compute exponent and or it into the exponent register.
- __ li(scratch1, (kBitsPerInt - 1) + kBinary32ExponentBias);
- __ subu(scratch1, scratch1, zeros);
-
- __ sll(scratch1, scratch1, kBinary32ExponentShift);
- __ or_(fval, fval, scratch1);
-
- // Shift up the source chopping the top bit off.
- __ Addu(zeros, zeros, Operand(1));
- // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
- __ sllv(ival, ival, zeros);
- // And the top (top 20 bits).
- __ srl(scratch1, ival, kBitsPerInt - kBinary32MantissaBits);
- __ or_(fval, fval, scratch1);
-
- __ bind(&done);
-
- __ sll(scratch1, wordoffset, 2);
- __ addu(scratch1, dst, scratch1);
- __ sw(fval, MemOperand(scratch1, 0));
- }
+ Register scratch1) {
+ __ mtc1(ival, f0);
+ __ cvt_s_w(f0, f0);
+ __ sll(scratch1, wordoffset, 2);
+ __ addu(scratch1, dst, scratch1);
+ __ swc1(f0, MemOperand(scratch1, 0));
}
@@ -1229,7 +1253,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
Handle<GlobalObject> global) {
Label miss;
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1237,13 +1261,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- if (!last->HasFastProperties()) {
- __ lw(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
- __ lw(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
- __ Branch(&miss, ne, scratch2(),
- Operand(isolate()->factory()->null_value()));
- }
-
HandlerFrontendFooter(success, &miss);
}
@@ -2100,11 +2117,7 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
- if (!CpuFeatures::IsSupported(FPU)) {
- return Handle<Code>::null();
- }
- CpuFeatureScope scope_fpu(masm(), FPU);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@@ -3170,36 +3183,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
-static bool IsElementTypeSigned(ElementsKind elements_kind) {
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- return true;
-
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- return false;
-
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- return false;
- }
- return false;
-}
-
-
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
@@ -3207,36 +3190,30 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
FPURegister double_scratch0,
FPURegister double_scratch1,
Label* fail) {
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- Label key_ok;
- // Check for smi or a smi inside a heap number. We convert the heap
- // number and check if the conversion is exact and fits into the smi
- // range.
- __ JumpIfSmi(key, &key_ok);
- __ CheckMap(key,
- scratch0,
- Heap::kHeapNumberMapRootIndex,
- fail,
- DONT_DO_SMI_CHECK);
- __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
- __ EmitFPUTruncate(kRoundToZero,
- scratch0,
- double_scratch0,
- at,
- double_scratch1,
- scratch1,
- kCheckForInexactConversion);
-
- __ Branch(fail, ne, scratch1, Operand(zero_reg));
-
- __ SmiTagCheckOverflow(key, scratch0, scratch1);
- __ BranchOnOverflow(fail, scratch1);
- __ bind(&key_ok);
- } else {
- // Check that the key is a smi.
- __ JumpIfNotSmi(key, fail);
- }
+ Label key_ok;
+ // Check for smi or a smi inside a heap number. We convert the heap
+ // number and check if the conversion is exact and fits into the smi
+ // range.
+ __ JumpIfSmi(key, &key_ok);
+ __ CheckMap(key,
+ scratch0,
+ Heap::kHeapNumberMapRootIndex,
+ fail,
+ DONT_DO_SMI_CHECK);
+ __ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
+ __ EmitFPUTruncate(kRoundToZero,
+ scratch0,
+ double_scratch0,
+ at,
+ double_scratch1,
+ scratch1,
+ kCheckForInexactConversion);
+
+ __ Branch(fail, ne, scratch1, Operand(zero_reg));
+
+ __ SmiTagCheckOverflow(key, scratch0, scratch1);
+ __ BranchOnOverflow(fail, scratch1);
+ __ bind(&key_ok);
}
@@ -3327,29 +3304,19 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(t0, key);
- StoreIntAsFloat(masm, a3, t0, t1, t2, t3, t4);
+ StoreIntAsFloat(masm, a3, t0, t1, t2);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ sll(t8, key, 2);
__ addu(a3, a3, t8);
// a3: effective address of the double element
FloatingPointHelper::Destination destination;
- if (CpuFeatures::IsSupported(FPU)) {
- destination = FloatingPointHelper::kFPURegisters;
- } else {
- destination = FloatingPointHelper::kCoreRegisters;
- }
+ destination = FloatingPointHelper::kFPURegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
t0, f2); // These are: scratch2, single_scratch.
- if (destination == FloatingPointHelper::kFPURegisters) {
- CpuFeatureScope scope(masm, FPU);
- __ sdc1(f0, MemOperand(a3, 0));
- } else {
- __ sw(t2, MemOperand(a3, 0));
- __ sw(t3, MemOperand(a3, Register::kSizeInBytes));
- }
+ __ sdc1(f0, MemOperand(a3, 0));
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@@ -3381,232 +3348,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(masm, FPU);
- __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
+ __ ldc1(f0, FieldMemOperand(a0, HeapNumber::kValueOffset));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ cvt_s_d(f0, f0);
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ swc1(f0, MemOperand(t8, 0));
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- __ sdc1(f0, MemOperand(t8, 0));
- } else {
- __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
-
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
- }
-
- // Entry registers are intact, a0 holds the value
- // which is the return value.
- __ mov(v0, a0);
- __ Ret();
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ cvt_s_d(f0, f0);
+ __ sll(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ swc1(f0, MemOperand(t8, 0));
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ sll(t8, key, 2);
+ __ addu(t8, a3, t8);
+ __ sdc1(f0, MemOperand(t8, 0));
} else {
- // FPU is not available, do manual conversions.
-
- __ lw(t3, FieldMemOperand(value, HeapNumber::kExponentOffset));
- __ lw(t4, FieldMemOperand(value, HeapNumber::kMantissaOffset));
-
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- Label done, nan_or_infinity_or_zero;
- static const int kMantissaInHiWordShift =
- kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
-
- static const int kMantissaInLoWordShift =
- kBitsPerInt - kMantissaInHiWordShift;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Branch(&nan_or_infinity_or_zero, eq, t6, Operand(zero_reg));
-
- __ xor_(t1, t6, t5);
- __ li(t2, kBinary32ExponentMask);
- __ Movz(t6, t2, t1); // Only if t6 is equal to t5.
- __ Branch(&nan_or_infinity_or_zero, eq, t1, Operand(zero_reg));
-
- // Rebias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Addu(t6,
- t6,
- Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
-
- __ li(t1, Operand(kBinary32MaxExponent));
- __ Slt(t1, t1, t6);
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Or(t2, t2, Operand(kBinary32ExponentMask));
- __ Movn(t3, t2, t1); // Only if t6 is gt kBinary32MaxExponent.
- __ Branch(&done, gt, t6, Operand(kBinary32MaxExponent));
-
- __ Slt(t1, t6, Operand(kBinary32MinExponent));
- __ And(t2, t3, Operand(HeapNumber::kSignMask));
- __ Movn(t3, t2, t1); // Only if t6 is lt kBinary32MinExponent.
- __ Branch(&done, lt, t6, Operand(kBinary32MinExponent));
-
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t7, t7, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t7, t7, t4);
- __ sll(t6, t6, kBinary32ExponentShift);
- __ or_(t3, t7, t6);
-
- __ bind(&done);
- __ sll(t9, key, 1);
- __ addu(t9, a3, t9);
- __ sw(t3, MemOperand(t9, 0));
-
- // Entry registers are intact, a0 holds the value which is the return
- // value.
- __ mov(v0, a0);
- __ Ret();
-
- __ bind(&nan_or_infinity_or_zero);
- __ And(t7, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ or_(t6, t6, t7);
- __ sll(t3, t3, kMantissaInHiWordShift);
- __ or_(t6, t6, t3);
- __ srl(t4, t4, kMantissaInLoWordShift);
- __ or_(t3, t6, t4);
- __ Branch(&done);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ sll(t8, key, 2);
- __ addu(t8, a3, t8);
- // t8: effective address of destination element.
- __ sw(t4, MemOperand(t8, 0));
- __ sw(t3, MemOperand(t8, Register::kSizeInBytes));
- __ mov(v0, a0);
- __ Ret();
- } else {
- bool is_signed_type = IsElementTypeSigned(elements_kind);
- int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
- int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
-
- Label done, sign;
-
- // Test for all special exponent values: zeros, subnormal numbers, NaNs
- // and infinities. All these should be converted to 0.
- __ li(t5, HeapNumber::kExponentMask);
- __ and_(t6, t3, t5);
- __ Movz(t3, zero_reg, t6); // Only if t6 is equal to zero.
- __ Branch(&done, eq, t6, Operand(zero_reg));
-
- __ xor_(t2, t6, t5);
- __ Movz(t3, zero_reg, t2); // Only if t6 is equal to t5.
- __ Branch(&done, eq, t6, Operand(t5));
-
- // Unbias exponent.
- __ srl(t6, t6, HeapNumber::kExponentShift);
- __ Subu(t6, t6, Operand(HeapNumber::kExponentBias));
- // If exponent is negative then result is 0.
- __ slt(t2, t6, zero_reg);
- __ Movn(t3, zero_reg, t2); // Only if exponent is negative.
- __ Branch(&done, lt, t6, Operand(zero_reg));
-
- // If exponent is too big then result is minimal value.
- __ slti(t1, t6, meaningfull_bits - 1);
- __ li(t2, min_value);
- __ Movz(t3, t2, t1); // Only if t6 is ge meaningfull_bits - 1.
- __ Branch(&done, ge, t6, Operand(meaningfull_bits - 1));
-
- __ And(t5, t3, Operand(HeapNumber::kSignMask));
- __ And(t3, t3, Operand(HeapNumber::kMantissaMask));
- __ Or(t3, t3, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
-
- __ li(t9, HeapNumber::kMantissaBitsInTopWord);
- __ subu(t6, t9, t6);
- __ slt(t1, t6, zero_reg);
- __ srlv(t2, t3, t6);
- __ Movz(t3, t2, t1); // Only if t6 is positive.
- __ Branch(&sign, ge, t6, Operand(zero_reg));
-
- __ subu(t6, zero_reg, t6);
- __ sllv(t3, t3, t6);
- __ li(t9, meaningfull_bits);
- __ subu(t6, t9, t6);
- __ srlv(t4, t4, t6);
- __ or_(t3, t3, t4);
-
- __ bind(&sign);
- __ subu(t2, t3, zero_reg);
- __ Movz(t3, t2, t5); // Only if t5 is zero.
-
- __ bind(&done);
-
- // Result is in t3.
- // This switch block should be exactly the same as above (FPU mode).
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ srl(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sb(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ addu(t8, a3, key);
- __ sh(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ sll(t8, key, 1);
- __ addu(t8, a3, t8);
- __ sw(t3, MemOperand(t8, 0));
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+ __ EmitECMATruncate(t3, f0, f2, t2, t1, t5);
+
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ srl(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ sb(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ addu(t8, a3, key);
+ __ sh(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ sll(t8, key, 1);
+ __ addu(t8, a3, t8);
+ __ sw(t3, MemOperand(t8, 0));
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
}
}
+
+ // Entry registers are intact, a0 holds the value
+ // which is the return value.
+ __ mov(v0, a0);
+ __ Ret();
}
// Slow case, key and receiver still in a0 and a1.
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index 44cab53cc..24730a041 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -139,6 +139,9 @@ void HeapObject::HeapObjectVerify() {
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
JSObject::cast(this)->JSObjectVerify();
break;
+ case JS_GENERATOR_OBJECT_TYPE:
+ JSGeneratorObject::cast(this)->JSGeneratorObjectVerify();
+ break;
case JS_MODULE_TYPE:
JSModule::cast(this)->JSModuleVerify();
break;
@@ -198,6 +201,9 @@ void HeapObject::HeapObjectVerify() {
case JS_ARRAY_BUFFER_TYPE:
JSArrayBuffer::cast(this)->JSArrayBufferVerify();
break;
+ case JS_TYPED_ARRAY_TYPE:
+ JSTypedArray::cast(this)->JSTypedArrayVerify();
+ break;
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
@@ -404,6 +410,17 @@ void FixedDoubleArray::FixedDoubleArrayVerify() {
}
+void JSGeneratorObject::JSGeneratorObjectVerify() {
+ // In an expression like "new g()", there can be a point where a generator
+ // object is allocated but its fields are all undefined, as it hasn't yet been
+ // initialized by the generator. Hence these weak checks.
+ VerifyObjectField(kFunctionOffset);
+ VerifyObjectField(kContextOffset);
+ VerifyObjectField(kOperandStackOffset);
+ VerifyObjectField(kContinuationOffset);
+}
+
+
void JSModule::JSModuleVerify() {
VerifyObjectField(kContextOffset);
VerifyObjectField(kScopeInfoOffset);
@@ -724,6 +741,28 @@ void JSArrayBuffer::JSArrayBufferVerify() {
}
+void JSTypedArray::JSTypedArrayVerify() {
+ CHECK(IsJSTypedArray());
+ JSObjectVerify();
+ VerifyPointer(buffer());
+ CHECK(buffer()->IsJSArrayBuffer() || buffer()->IsUndefined());
+
+ VerifyPointer(byte_offset());
+ CHECK(byte_offset()->IsSmi() || byte_offset()->IsHeapNumber()
+ || byte_offset()->IsUndefined());
+
+ VerifyPointer(byte_length());
+ CHECK(byte_length()->IsSmi() || byte_length()->IsHeapNumber()
+ || byte_length()->IsUndefined());
+
+ VerifyPointer(length());
+ CHECK(length()->IsSmi() || length()->IsHeapNumber()
+ || length()->IsUndefined());
+
+ VerifyPointer(elements());
+}
+
+
void Foreign::ForeignVerify() {
CHECK(IsForeign());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index ec03405fd..efc1764dd 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -653,6 +653,7 @@ TYPE_CHECKER(Code, CODE_TYPE)
TYPE_CHECKER(Oddball, ODDBALL_TYPE)
TYPE_CHECKER(JSGlobalPropertyCell, JS_GLOBAL_PROPERTY_CELL_TYPE)
TYPE_CHECKER(SharedFunctionInfo, SHARED_FUNCTION_INFO_TYPE)
+TYPE_CHECKER(JSGeneratorObject, JS_GENERATOR_OBJECT_TYPE)
TYPE_CHECKER(JSModule, JS_MODULE_TYPE)
TYPE_CHECKER(JSValue, JS_VALUE_TYPE)
TYPE_CHECKER(JSDate, JS_DATE_TYPE)
@@ -675,6 +676,7 @@ bool Object::IsBoolean() {
TYPE_CHECKER(JSArray, JS_ARRAY_TYPE)
TYPE_CHECKER(JSArrayBuffer, JS_ARRAY_BUFFER_TYPE)
+TYPE_CHECKER(JSTypedArray, JS_TYPED_ARRAY_TYPE)
TYPE_CHECKER(JSRegExp, JS_REGEXP_TYPE)
@@ -1488,22 +1490,59 @@ MaybeObject* JSObject::AddFastPropertyUsingMap(Map* map) {
}
-bool JSObject::TryTransitionToField(Handle<JSObject> object,
- Handle<Name> key) {
- if (!object->map()->HasTransitionArray()) return false;
- Handle<Map> target;
- {
- AssertNoAllocation no_allocation;
- TransitionArray* transitions = object->map()->transitions();
- int transition = transitions->Search(*key);
- if (transition == TransitionArray::kNotFound) return false;
- PropertyDetails target_details = transitions->GetTargetDetails(transition);
- if (target_details.type() != FIELD) return false;
- if (target_details.attributes() != NONE) return false;
- target = Handle<Map>(transitions->GetTarget(transition));
+MaybeObject* JSObject::TransitionToMap(Map* map) {
+ ASSERT(this->map()->inobject_properties() == map->inobject_properties());
+ ElementsKind expected_kind = this->map()->elements_kind();
+ if (map->elements_kind() != expected_kind) {
+ MaybeObject* maybe_map = map->AsElementsKind(expected_kind);
+ if (!maybe_map->To(&map)) return maybe_map;
}
- JSObject::AddFastPropertyUsingMap(object, target);
- return true;
+ int total_size =
+ map->NumberOfOwnDescriptors() + map->unused_property_fields();
+ int out_of_object = total_size - map->inobject_properties();
+ if (out_of_object != properties()->length()) {
+ FixedArray* new_properties;
+ MaybeObject* maybe_properties = properties()->CopySize(out_of_object);
+ if (!maybe_properties->To(&new_properties)) return maybe_properties;
+ set_properties(new_properties);
+ }
+ set_map(map);
+ return this;
+}
+
+
+Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
+ AssertNoAllocation no_gc;
+ if (!map->HasTransitionArray()) return Handle<String>::null();
+ TransitionArray* transitions = map->transitions();
+ if (!transitions->IsSimpleTransition()) return Handle<String>::null();
+ int transition = TransitionArray::kSimpleTransitionIndex;
+ PropertyDetails details = transitions->GetTargetDetails(transition);
+ Name* name = transitions->GetKey(transition);
+ if (details.type() != FIELD) return Handle<String>::null();
+ if (details.attributes() != NONE) return Handle<String>::null();
+ if (!name->IsString()) return Handle<String>::null();
+ return Handle<String>(String::cast(name));
+}
+
+
+Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
+ ASSERT(!ExpectedTransitionKey(map).is_null());
+ return Handle<Map>(map->transitions()->GetTarget(
+ TransitionArray::kSimpleTransitionIndex));
+}
+
+
+Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
+ AssertNoAllocation no_allocation;
+ if (!map->HasTransitionArray()) return Handle<Map>::null();
+ TransitionArray* transitions = map->transitions();
+ int transition = transitions->Search(*key);
+ if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
+ PropertyDetails target_details = transitions->GetTargetDetails(transition);
+ if (target_details.type() != FIELD) return Handle<Map>::null();
+ if (target_details.attributes() != NONE) return Handle<Map>::null();
+ return Handle<Map>(transitions->GetTarget(transition));
}
@@ -1547,6 +1586,8 @@ int JSObject::GetHeaderSize() {
// field operations considerably on average.
if (type == JS_OBJECT_TYPE) return JSObject::kHeaderSize;
switch (type) {
+ case JS_GENERATOR_OBJECT_TYPE:
+ return JSGeneratorObject::kSize;
case JS_MODULE_TYPE:
return JSModule::kSize;
case JS_GLOBAL_PROXY_TYPE:
@@ -1565,6 +1606,8 @@ int JSObject::GetHeaderSize() {
return JSArray::kSize;
case JS_ARRAY_BUFFER_TYPE:
return JSArrayBuffer::kSize;
+ case JS_TYPED_ARRAY_TYPE:
+ return JSTypedArray::kSize;
case JS_SET_TYPE:
return JSSet::kSize;
case JS_MAP_TYPE:
@@ -2452,6 +2495,7 @@ CAST_ACCESSOR(JSBuiltinsObject)
CAST_ACCESSOR(Code)
CAST_ACCESSOR(JSArray)
CAST_ACCESSOR(JSArrayBuffer)
+CAST_ACCESSOR(JSTypedArray)
CAST_ACCESSOR(JSRegExp)
CAST_ACCESSOR(JSProxy)
CAST_ACCESSOR(JSFunctionProxy)
@@ -2508,8 +2552,8 @@ void Name::set_hash_field(uint32_t value) {
bool Name::Equals(Name* other) {
if (other == this) return true;
- if (this->IsSymbol() || other->IsSymbol() ||
- (this->IsInternalizedString() && other->IsInternalizedString())) {
+ if ((this->IsInternalizedString() && other->IsInternalizedString()) ||
+ this->IsSymbol() || other->IsSymbol()) {
return false;
}
return String::cast(this)->SlowEquals(String::cast(other));
@@ -3762,33 +3806,33 @@ void Code::set_safepoint_table_offset(unsigned offset) {
}
-unsigned Code::stack_check_table_offset() {
+unsigned Code::back_edge_table_offset() {
ASSERT_EQ(FUNCTION, kind());
- return StackCheckTableOffsetField::decode(
+ return BackEdgeTableOffsetField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
-void Code::set_stack_check_table_offset(unsigned offset) {
+void Code::set_back_edge_table_offset(unsigned offset) {
ASSERT_EQ(FUNCTION, kind());
ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StackCheckTableOffsetField::update(previous, offset);
+ int updated = BackEdgeTableOffsetField::update(previous, offset);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
-bool Code::stack_check_patched_for_osr() {
+bool Code::back_edges_patched_for_osr() {
ASSERT_EQ(FUNCTION, kind());
- return StackCheckPatchedForOSRField::decode(
+ return BackEdgesPatchedForOSRField::decode(
READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
}
-void Code::set_stack_check_patched_for_osr(bool value) {
+void Code::set_back_edges_patched_for_osr(bool value) {
ASSERT_EQ(FUNCTION, kind());
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
- int updated = StackCheckPatchedForOSRField::update(previous, value);
+ int updated = BackEdgesPatchedForOSRField::update(previous, value);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
}
@@ -4973,6 +5017,19 @@ void Foreign::set_foreign_address(Address value) {
}
+ACCESSORS(JSGeneratorObject, function, JSFunction, kFunctionOffset)
+ACCESSORS(JSGeneratorObject, context, Object, kContextOffset)
+SMI_ACCESSORS(JSGeneratorObject, continuation, kContinuationOffset)
+ACCESSORS(JSGeneratorObject, operand_stack, FixedArray, kOperandStackOffset)
+
+
+JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
+ ASSERT(obj->IsJSGeneratorObject());
+ ASSERT(HeapObject::cast(obj)->Size() == JSGeneratorObject::kSize);
+ return reinterpret_cast<JSGeneratorObject*>(obj);
+}
+
+
ACCESSORS(JSModule, context, Object, kContextOffset)
ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
@@ -5154,6 +5211,12 @@ void JSArrayBuffer::set_backing_store(void* value, WriteBarrierMode mode) {
ACCESSORS(JSArrayBuffer, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSTypedArray, buffer, Object, kBufferOffset)
+ACCESSORS(JSTypedArray, byte_offset, Object, kByteOffsetOffset)
+ACCESSORS(JSTypedArray, byte_length, Object, kByteLengthOffset)
+ACCESSORS(JSTypedArray, length, Object, kLengthOffset)
+
+
ACCESSORS(JSRegExp, data, Object, kDataOffset)
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 834223261..aa71a961b 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -132,6 +132,7 @@ void HeapObject::HeapObjectPrint(FILE* out) {
case JS_OBJECT_TYPE: // fall through
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
case JS_ARRAY_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_REGEXP_TYPE:
JSObject::cast(this)->JSObjectPrint(out);
break;
@@ -186,6 +187,8 @@ void HeapObject::HeapObjectPrint(FILE* out) {
break;
case JS_ARRAY_BUFFER_TYPE:
JSArrayBuffer::cast(this)->JSArrayBufferPrint(out);
+ case JS_TYPED_ARRAY_TYPE:
+ JSTypedArray::cast(this)->JSTypedArrayPrint(out);
#define MAKE_STRUCT_CASE(NAME, Name, name) \
case NAME##_TYPE: \
Name::cast(this)->Name##Print(out); \
@@ -530,6 +533,7 @@ static const char* TypeToString(InstanceType type) {
case ODDBALL_TYPE: return "ODDBALL";
case JS_GLOBAL_PROPERTY_CELL_TYPE: return "JS_GLOBAL_PROPERTY_CELL";
case SHARED_FUNCTION_INFO_TYPE: return "SHARED_FUNCTION_INFO";
+ case JS_GENERATOR_OBJECT_TYPE: return "JS_GENERATOR_OBJECT";
case JS_MODULE_TYPE: return "JS_MODULE";
case JS_FUNCTION_TYPE: return "JS_FUNCTION";
case CODE_TYPE: return "CODE";
@@ -807,6 +811,22 @@ void JSArrayBuffer::JSArrayBufferPrint(FILE* out) {
}
+void JSTypedArray::JSTypedArrayPrint(FILE* out) {
+ HeapObject::PrintHeader(out, "JSTypedArray");
+ PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
+ PrintF(out, " - buffer =");
+ buffer()->ShortPrint(out);
+ PrintF(out, "\n - byte_offset = ");
+ byte_offset()->ShortPrint(out);
+ PrintF(out, "\n - byte_length = ");
+ byte_length()->ShortPrint(out);
+ PrintF(out, " - length = ");
+ length()->ShortPrint(out);
+ PrintF("\n");
+ PrintElements(out);
+}
+
+
void JSFunction::JSFunctionPrint(FILE* out) {
HeapObject::PrintHeader(out, "Function");
PrintF(out, " - map = 0x%p\n", reinterpret_cast<void*>(map()));
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index fa53562bd..7b5c8bef7 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -136,6 +136,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
@@ -145,6 +146,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
case JS_BUILTINS_OBJECT_TYPE:
case JS_MESSAGE_OBJECT_TYPE:
case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
return GetVisitorIdForSize(kVisitJSObject,
kVisitJSObjectGeneric,
instance_size);
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 8f03b1043..33be173a5 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -1290,6 +1290,10 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
break;
}
+ case JS_GENERATOR_OBJECT_TYPE: {
+ accumulator->Add("<JS Generator>");
+ break;
+ }
case JS_MODULE_TYPE: {
accumulator->Add("<JS Module>");
break;
@@ -1546,11 +1550,13 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
break;
case JS_OBJECT_TYPE:
case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+ case JS_GENERATOR_OBJECT_TYPE:
case JS_MODULE_TYPE:
case JS_VALUE_TYPE:
case JS_DATE_TYPE:
case JS_ARRAY_TYPE:
case JS_ARRAY_BUFFER_TYPE:
+ case JS_TYPED_ARRAY_TYPE:
case JS_SET_TYPE:
case JS_MAP_TYPE:
case JS_WEAK_MAP_TYPE:
@@ -2602,13 +2608,18 @@ MaybeObject* JSObject::GetElementsTransitionMapSlow(ElementsKind to_kind) {
return start_map->CopyAsElementsKind(to_kind, OMIT_TRANSITION);
}
- Map* closest_map = FindClosestElementsTransition(start_map, to_kind);
+ return start_map->AsElementsKind(to_kind);
+}
+
- if (closest_map->elements_kind() == to_kind) {
+MaybeObject* Map::AsElementsKind(ElementsKind kind) {
+ Map* closest_map = FindClosestElementsTransition(this, kind);
+
+ if (closest_map->elements_kind() == kind) {
return closest_map;
}
- return AddMissingElementsTransitions(closest_map, to_kind);
+ return AddMissingElementsTransitions(closest_map, kind);
}
@@ -3073,6 +3084,13 @@ void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
}
+void JSObject::TransitionToMap(Handle<JSObject> object, Handle<Map> map) {
+ CALL_HEAP_FUNCTION_VOID(
+ object->GetIsolate(),
+ object->TransitionToMap(*map));
+}
+
+
MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
Name* name_raw,
Object* value_raw,
@@ -7991,7 +8009,7 @@ MaybeObject* String::SubString(int start, int end, PretenureFlag pretenure) {
void String::PrintOn(FILE* file) {
int length = this->length();
for (int i = 0; i < length; i++) {
- fprintf(file, "%c", Get(i));
+ PrintF(file, "%c", Get(i));
}
}
@@ -8546,6 +8564,7 @@ bool SharedFunctionInfo::CanGenerateInlineConstructor(Object* prototype) {
// Check the basic conditions for generating inline constructor code.
if (!FLAG_inline_new
|| !has_only_simple_this_property_assignments()
+ || is_generator()
|| this_property_assignments_count() == 0) {
return false;
}
@@ -9097,13 +9116,6 @@ SafepointEntry Code::GetSafepointEntry(Address pc) {
}
-void Code::SetNoStackCheckTable() {
- // Indicate the absence of a stack-check table by a table start after the
- // end of the instructions. Table start must be aligned, so round up.
- set_stack_check_table_offset(RoundUp(instruction_size(), kIntSize));
-}
-
-
Map* Code::FindFirstMap() {
ASSERT(is_inline_cache_stub());
AssertNoAllocation no_allocation;
@@ -9622,18 +9634,20 @@ void Code::Disassemble(const char* name, FILE* out) {
}
PrintF(out, "\n");
} else if (kind() == FUNCTION) {
- unsigned offset = stack_check_table_offset();
- // If there is no stack check table, the "table start" will at or after
+ unsigned offset = back_edge_table_offset();
+ // If there is no back edge table, the "table start" will be at or after
// (due to alignment) the end of the instruction stream.
if (static_cast<int>(offset) < instruction_size()) {
- unsigned* address =
- reinterpret_cast<unsigned*>(instruction_start() + offset);
- unsigned length = address[0];
- PrintF(out, "Stack checks (size = %u)\n", length);
- PrintF(out, "ast_id pc_offset\n");
- for (unsigned i = 0; i < length; ++i) {
- unsigned index = (2 * i) + 1;
- PrintF(out, "%6u %9u\n", address[index], address[index + 1]);
+ Address back_edge_cursor = instruction_start() + offset;
+ uint32_t table_length = Memory::uint32_at(back_edge_cursor);
+ PrintF(out, "Back edges (size = %u)\n", table_length);
+ PrintF(out, "ast_id pc_offset loop_depth\n");
+ for (uint32_t i = 0; i < table_length; ++i) {
+ uint32_t ast_id = Memory::uint32_at(back_edge_cursor);
+ uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
+ uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
+ PrintF(out, "%6u %9u %10u\n", ast_id, pc_offset, loop_depth);
+ back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
PrintF(out, "\n");
}
@@ -11545,9 +11559,8 @@ MaybeObject* JSObject::GetPropertyWithInterceptor(
}
-bool JSObject::HasRealNamedProperty(Name* key) {
+bool JSObject::HasRealNamedProperty(Isolate* isolate, Name* key) {
// Check access rights if needed.
- Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded()) {
if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
@@ -11561,73 +11574,21 @@ bool JSObject::HasRealNamedProperty(Name* key) {
}
-bool JSObject::HasRealElementProperty(uint32_t index) {
+bool JSObject::HasRealElementProperty(Isolate* isolate, uint32_t index) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- Heap* heap = GetHeap();
- if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
return false;
}
}
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) return true;
-
- switch (GetElementsKind()) {
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedArray::cast(elements())->length());
- return (index < length) &&
- !FixedArray::cast(elements())->get(index)->IsTheHole();
- }
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- uint32_t length = IsJSArray() ?
- static_cast<uint32_t>(
- Smi::cast(JSArray::cast(this)->length())->value()) :
- static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
- return (index < length) &&
- !FixedDoubleArray::cast(elements())->is_the_hole(index);
- break;
- }
- case EXTERNAL_PIXEL_ELEMENTS: {
- ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
- return index < static_cast<uint32_t>(pixels->length());
- }
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS: {
- ExternalArray* array = ExternalArray::cast(elements());
- return index < static_cast<uint32_t>(array->length());
- }
- case DICTIONARY_ELEMENTS: {
- return element_dictionary()->FindEntry(index)
- != SeededNumberDictionary::kNotFound;
- }
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNIMPLEMENTED();
- break;
- }
- // All possibilities have been handled above already.
- UNREACHABLE();
- return GetHeap()->null_value();
+ return GetElementAttributeWithoutInterceptor(this, index, false) != ABSENT;
}
-bool JSObject::HasRealNamedCallbackProperty(Name* key) {
+bool JSObject::HasRealNamedCallbackProperty(Isolate* isolate, Name* key) {
// Check access rights if needed.
- Isolate* isolate = GetIsolate();
if (IsAccessCheckNeeded()) {
if (!isolate->MayNamedAccess(this, key, v8::ACCESS_HAS)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
@@ -14128,7 +14089,7 @@ Handle<DeclaredAccessorDescriptor> DeclaredAccessorDescriptor::Create(
if (previous_length != 0) {
uint8_t* previous_array =
previous->serialized_data()->GetDataStartAddress();
- memcpy(array, previous_array, previous_length);
+ OS::MemCopy(array, previous_array, previous_length);
array += previous_length;
}
ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 37be25f88..898b17621 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -57,11 +57,13 @@
// - JSObject
// - JSArray
// - JSArrayBuffer
+// - JSTypedArray
// - JSSet
// - JSMap
// - JSWeakMap
// - JSRegExp
// - JSFunction
+// - JSGeneratorObject
// - JSModule
// - GlobalObject
// - JSGlobalObject
@@ -395,12 +397,14 @@ const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
V(JS_DATE_TYPE) \
V(JS_OBJECT_TYPE) \
V(JS_CONTEXT_EXTENSION_OBJECT_TYPE) \
+ V(JS_GENERATOR_OBJECT_TYPE) \
V(JS_MODULE_TYPE) \
V(JS_GLOBAL_OBJECT_TYPE) \
V(JS_BUILTINS_OBJECT_TYPE) \
V(JS_GLOBAL_PROXY_TYPE) \
V(JS_ARRAY_TYPE) \
V(JS_ARRAY_BUFFER_TYPE) \
+ V(JS_TYPED_ARRAY_TYPE) \
V(JS_PROXY_TYPE) \
V(JS_WEAK_MAP_TYPE) \
V(JS_REGEXP_TYPE) \
@@ -726,12 +730,14 @@ enum InstanceType {
JS_DATE_TYPE,
JS_OBJECT_TYPE,
JS_CONTEXT_EXTENSION_OBJECT_TYPE,
+ JS_GENERATOR_OBJECT_TYPE,
JS_MODULE_TYPE,
JS_GLOBAL_OBJECT_TYPE,
JS_BUILTINS_OBJECT_TYPE,
JS_GLOBAL_PROXY_TYPE,
JS_ARRAY_TYPE,
JS_ARRAY_BUFFER_TYPE,
+ JS_TYPED_ARRAY_TYPE,
JS_SET_TYPE,
JS_MAP_TYPE,
JS_WEAK_MAP_TYPE,
@@ -953,13 +959,14 @@ class MaybeObject BASE_EMBEDDED {
V(JSReceiver) \
V(JSObject) \
V(JSContextExtensionObject) \
+ V(JSGeneratorObject) \
V(JSModule) \
V(Map) \
V(DescriptorArray) \
V(TransitionArray) \
V(DeoptimizationInputData) \
V(DeoptimizationOutputData) \
- V(DependentCode) \
+ V(DependentCode) \
V(TypeFeedbackCells) \
V(FixedArray) \
V(FixedDoubleArray) \
@@ -978,6 +985,7 @@ class MaybeObject BASE_EMBEDDED {
V(Boolean) \
V(JSArray) \
V(JSArrayBuffer) \
+ V(JSTypedArray) \
V(JSProxy) \
V(JSFunctionProxy) \
V(JSSet) \
@@ -1495,6 +1503,8 @@ class HeapNumber: public HeapObject {
static const int kExponentBits = 11;
static const int kExponentBias = 1023;
static const int kExponentShift = 20;
+ static const int kInfinityOrNanExponent =
+ (kExponentMask >> kExponentShift) - kExponentBias;
static const int kMantissaBitsInTopWord = 20;
static const int kNonMantissaBitsInTopWord = 12;
@@ -1768,10 +1778,13 @@ class JSObject: public JSReceiver {
Handle<Object> value,
PropertyAttributes attributes);
+ static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
+ static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
+
// Try to follow an existing transition to a field with attributes NONE. The
// return value indicates whether the transition was successful.
- static inline bool TryTransitionToField(Handle<JSObject> object,
- Handle<Name> key);
+ static inline Handle<Map> FindTransitionToField(Handle<Map> map,
+ Handle<Name> key);
inline int LastAddedFieldIndex();
@@ -1779,6 +1792,8 @@ class JSObject: public JSReceiver {
// passed map. This also extends the property backing store if necessary.
static void AddFastPropertyUsingMap(Handle<JSObject> object, Handle<Map> map);
inline MUST_USE_RESULT MaybeObject* AddFastPropertyUsingMap(Map* map);
+ static void TransitionToMap(Handle<JSObject> object, Handle<Map> map);
+ inline MUST_USE_RESULT MaybeObject* TransitionToMap(Map* map);
// Can cause GC.
MUST_USE_RESULT MaybeObject* SetLocalPropertyIgnoreAttributes(
@@ -2034,9 +2049,9 @@ class JSObject: public JSReceiver {
inline bool HasIndexedInterceptor();
// Support functions for v8 api (needed for correct interceptor behavior).
- bool HasRealNamedProperty(Name* key);
- bool HasRealElementProperty(uint32_t index);
- bool HasRealNamedCallbackProperty(Name* key);
+ bool HasRealNamedProperty(Isolate* isolate, Name* key);
+ bool HasRealElementProperty(Isolate* isolate, uint32_t index);
+ bool HasRealNamedCallbackProperty(Isolate* isolate, Name* key);
// Get the header size for a JSObject. Used to compute the index of
// internal fields as well as the number of internal fields.
@@ -4501,13 +4516,13 @@ class Code: public HeapObject {
inline unsigned safepoint_table_offset();
inline void set_safepoint_table_offset(unsigned offset);
- // [stack_check_table_start]: For kind FUNCTION, the offset in the
- // instruction stream where the stack check table starts.
- inline unsigned stack_check_table_offset();
- inline void set_stack_check_table_offset(unsigned offset);
+ // [back_edge_table_start]: For kind FUNCTION, the offset in the
+ // instruction stream where the back edge table starts.
+ inline unsigned back_edge_table_offset();
+ inline void set_back_edge_table_offset(unsigned offset);
- inline bool stack_check_patched_for_osr();
- inline void set_stack_check_patched_for_osr(bool value);
+ inline bool back_edges_patched_for_osr();
+ inline void set_back_edges_patched_for_osr(bool value);
// [check type]: For kind CALL_IC, tells how to check if the
// receiver is valid for the given call.
@@ -4538,10 +4553,6 @@ class Code: public HeapObject {
// Get the safepoint entry for the given pc.
SafepointEntry GetSafepointEntry(Address pc);
- // Mark this code object as not having a stack check table. Assumes kind
- // is FUNCTION.
- void SetNoStackCheckTable();
-
// Find the first map in an IC stub.
Map* FindFirstMap();
void FindAllMaps(MapHandleList* maps);
@@ -4791,8 +4802,8 @@ class Code: public HeapObject {
kStubMajorKeyFirstBit, kStubMajorKeyBits> {}; // NOLINT
// KindSpecificFlags2 layout (FUNCTION)
- class StackCheckTableOffsetField: public BitField<int, 0, 31> {};
- class StackCheckPatchedForOSRField: public BitField<bool, 31, 1> {};
+ class BackEdgeTableOffsetField: public BitField<int, 0, 31> {};
+ class BackEdgesPatchedForOSRField: public BitField<bool, 31, 1> {};
// Signed field cannot be encoded using the BitField class.
static const int kArgumentsCountShift = 17;
@@ -5242,6 +5253,7 @@ class Map: public HeapObject {
Descriptor* descriptor,
int index,
TransitionFlag flag);
+ MUST_USE_RESULT MaybeObject* AsElementsKind(ElementsKind kind);
MUST_USE_RESULT MaybeObject* CopyAsElementsKind(ElementsKind kind,
TransitionFlag flag);
@@ -6251,6 +6263,40 @@ class SharedFunctionInfo: public HeapObject {
};
+class JSGeneratorObject: public JSObject {
+ public:
+ // [function]: The function corresponding to this generator object.
+ DECL_ACCESSORS(function, JSFunction)
+
+ // [context]: The context of the suspended computation, or undefined.
+ DECL_ACCESSORS(context, Object)
+
+ // [continuation]: Offset into code of continuation.
+ inline int continuation();
+ inline void set_continuation(int continuation);
+
+ // [operands]: Saved operand stack.
+ DECL_ACCESSORS(operand_stack, FixedArray)
+
+ // Casting.
+ static inline JSGeneratorObject* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSGeneratorObject)
+ DECLARE_VERIFIER(JSGeneratorObject)
+
+ // Layout description.
+ static const int kFunctionOffset = JSObject::kHeaderSize;
+ static const int kContextOffset = kFunctionOffset + kPointerSize;
+ static const int kContinuationOffset = kContextOffset + kPointerSize;
+ static const int kOperandStackOffset = kContinuationOffset + kPointerSize;
+ static const int kSize = kOperandStackOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSGeneratorObject);
+};
+
+
// Representation for module instance objects.
class JSModule: public JSObject {
public:
@@ -8517,6 +8563,38 @@ class JSArrayBuffer: public JSObject {
};
+class JSTypedArray: public JSObject {
+ public:
+ // [buffer]: ArrayBuffer that this typed array views.
+ DECL_ACCESSORS(buffer, Object)
+
+ // [byte_length]: offset of typed array in bytes.
+ DECL_ACCESSORS(byte_offset, Object)
+
+ // [byte_length]: length of typed array in bytes.
+ DECL_ACCESSORS(byte_length, Object)
+
+ // [length]: length of typed array in elements.
+ DECL_ACCESSORS(length, Object)
+
+ // Casting.
+ static inline JSTypedArray* cast(Object* obj);
+
+ // Dispatched behavior.
+ DECLARE_PRINTER(JSTypedArray)
+ DECLARE_VERIFIER(JSTypedArray)
+
+ static const int kBufferOffset = JSObject::kHeaderSize;
+ static const int kByteOffsetOffset = kBufferOffset + kPointerSize;
+ static const int kByteLengthOffset = kByteOffsetOffset + kPointerSize;
+ static const int kLengthOffset = kByteLengthOffset + kPointerSize;
+ static const int kSize = kLengthOffset + kPointerSize;
+
+ private:
+ DISALLOW_IMPLICIT_CONSTRUCTORS(JSTypedArray);
+};
+
+
// Foreign describes objects pointing from JavaScript to C structures.
// Since they cannot contain references to JS HeapObjects they can be
// placed in old_data_space.
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index d1228d2f7..b4ab62382 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -486,14 +486,13 @@ class Parser::BlockState BASE_EMBEDDED {
Parser::FunctionState::FunctionState(Parser* parser,
Scope* scope,
- bool is_generator,
Isolate* isolate)
: next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
next_handler_index_(0),
expected_property_count_(0),
- is_generator_(is_generator),
only_simple_this_property_assignments_(false),
this_property_assignments_(isolate->factory()->empty_fixed_array()),
+ generator_object_variable_(NULL),
parser_(parser),
outer_function_state_(parser->current_function_state_),
outer_scope_(parser->top_scope_),
@@ -537,36 +536,32 @@ Parser::FunctionState::~FunctionState() {
// ----------------------------------------------------------------------------
// Implementation of Parser
-Parser::Parser(CompilationInfo* info,
- int parser_flags,
- v8::Extension* extension,
- ScriptDataImpl* pre_data)
+Parser::Parser(CompilationInfo* info)
: isolate_(info->isolate()),
- symbol_cache_(pre_data ? pre_data->symbol_count() : 0, info->zone()),
+ symbol_cache_(0, info->zone()),
script_(info->script()),
scanner_(isolate_->unicode_cache()),
reusable_preparser_(NULL),
top_scope_(NULL),
current_function_state_(NULL),
target_stack_(NULL),
- extension_(extension),
- pre_data_(pre_data),
+ extension_(info->extension()),
+ pre_parse_data_(NULL),
fni_(NULL),
- allow_natives_syntax_((parser_flags & kAllowNativesSyntax) != 0),
- allow_lazy_((parser_flags & kAllowLazy) != 0),
- allow_modules_((parser_flags & kAllowModules) != 0),
+ allow_natives_syntax_(false),
+ allow_lazy_(false),
+ allow_generators_(false),
stack_overflow_(false),
parenthesized_function_(false),
zone_(info->zone()),
info_(info) {
ASSERT(!script_.is_null());
isolate_->set_ast_node_id(0);
- if ((parser_flags & kLanguageModeMask) == EXTENDED_MODE) {
- scanner().SetHarmonyScoping(true);
- }
- if ((parser_flags & kAllowModules) != 0) {
- scanner().SetHarmonyModules(true);
- }
+ set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
+ set_allow_modules(!info->is_native() && FLAG_harmony_modules);
+ set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
+ set_allow_lazy(false); // Must be explicitly enabled.
+ set_allow_generators(FLAG_harmony_generators);
}
@@ -617,7 +612,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ZoneScope* zone_scope) {
ASSERT(top_scope_ == NULL);
ASSERT(target_stack_ == NULL);
- if (pre_data_ != NULL) pre_data_->Initialize();
+ if (pre_parse_data_ != NULL) pre_parse_data_->Initialize();
Handle<String> no_name = isolate()->factory()->empty_string();
@@ -638,15 +633,16 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
scope->set_end_position(source->length());
// Compute the parsing mode.
- Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL || scope->is_eval_scope()) {
+ Mode mode = (FLAG_lazy && allow_lazy()) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax() ||
+ extension_ != NULL ||
+ scope->is_eval_scope()) {
mode = PARSE_EAGERLY;
}
ParsingModeScope parsing_mode(this, mode);
- bool is_generator = false;
// Enters 'scope'.
- FunctionState function_state(this, scope, is_generator, isolate());
+ FunctionState function_state(this, scope, isolate());
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
@@ -760,8 +756,7 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
zone());
}
- bool is_generator = false; // Top scope is not a generator.
- FunctionState function_state(this, scope, is_generator, isolate());
+ FunctionState function_state(this, scope, isolate());
ASSERT(scope->language_mode() != STRICT_MODE || !info()->is_classic_mode());
ASSERT(scope->language_mode() != EXTENDED_MODE ||
info()->is_extended_mode());
@@ -801,8 +796,8 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source,
Handle<String> Parser::GetSymbol(bool* ok) {
int symbol_id = -1;
- if (pre_data() != NULL) {
- symbol_id = pre_data()->GetSymbolIdentifier();
+ if (pre_parse_data() != NULL) {
+ symbol_id = pre_parse_data()->GetSymbolIdentifier();
}
return LookupSymbol(symbol_id);
}
@@ -1092,7 +1087,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
- top_scope_->SetLanguageMode(FLAG_harmony_scoping
+ top_scope_->SetLanguageMode(allow_harmony_scoping()
? EXTENDED_MODE : STRICT_MODE);
// "use strict" is the only directive for now.
directive_prologue = false;
@@ -1910,7 +1905,7 @@ Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
// '{' FunctionBody '}'
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- bool is_generator = FLAG_harmony_generators && Check(Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
bool is_strict_reserved = false;
Handle<String> name = ParseIdentifierOrStrictReservedWord(
&is_strict_reserved, CHECK_OK);
@@ -3105,8 +3100,11 @@ Expression* Parser::ParseYieldExpression(bool* ok) {
int position = scanner().peek_location().beg_pos;
Expect(Token::YIELD, CHECK_OK);
bool is_yield_star = Check(Token::MUL);
+ Expression* generator_object = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
Expression* expression = ParseAssignmentExpression(false, CHECK_OK);
- return factory()->NewYield(expression, is_yield_star, position);
+ return factory()->NewYield(generator_object, expression, is_yield_star,
+ position);
}
@@ -3486,7 +3484,7 @@ Expression* Parser::ParseMemberWithNewPrefixesExpression(PositionStack* stack,
if (peek() == Token::FUNCTION) {
Expect(Token::FUNCTION, CHECK_OK);
int function_token_position = scanner().location().beg_pos;
- bool is_generator = FLAG_harmony_generators && Check(Token::MUL);
+ bool is_generator = allow_generators() && Check(Token::MUL);
Handle<String> name;
bool is_strict_reserved_name = false;
if (peek_any_identifier()) {
@@ -3702,7 +3700,7 @@ Expression* Parser::ParsePrimaryExpression(bool* ok) {
break;
case Token::MOD:
- if (allow_natives_syntax_ || extension_ != NULL) {
+ if (allow_natives_syntax() || extension_ != NULL) {
result = ParseV8Intrinsic(CHECK_OK);
break;
}
@@ -4391,9 +4389,25 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
: FunctionLiteral::kNotGenerator;
AstProperties ast_properties;
// Parse function body.
- { FunctionState function_state(this, scope, is_generator, isolate());
+ { FunctionState function_state(this, scope, isolate());
top_scope_->SetScopeName(function_name);
+ if (is_generator) {
+ // For generators, allocating variables in contexts is currently a win
+ // because it minimizes the work needed to suspend and resume an
+ // activation.
+ top_scope_->ForceContextAllocation();
+
+ // Calling a generator returns a generator object. That object is stored
+ // in a temporary variable, a definition that is used by "yield"
+ // expressions. Presence of a variable for the generator object in the
+ // FunctionState indicates that this function is a generator.
+ Handle<String> tempname = isolate()->factory()->InternalizeOneByteString(
+ STATIC_ASCII_VECTOR(".generator_object"));
+ Variable* temp = top_scope_->DeclarationScope()->NewTemporary(tempname);
+ function_state.set_generator_object_variable(temp);
+ }
+
// FormalParameterList ::
// '(' (Identifier)*[','] ')'
Expect(Token::LPAREN, CHECK_OK);
@@ -4475,11 +4489,11 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
if (is_lazily_compiled) {
int function_block_pos = scanner().location().beg_pos;
FunctionEntry entry;
- if (pre_data_ != NULL) {
- // If we have pre_data_, we use it to skip parsing the function body.
- // the preparser data contains the information we need to construct the
- // lazy function.
- entry = pre_data()->GetFunctionEntry(function_block_pos);
+ if (pre_parse_data_ != NULL) {
+ // If we have pre_parse_data_, we use it to skip parsing the function
+ // body. The preparser data contains the information we need to
+ // construct the lazy function.
+ entry = pre_parse_data()->GetFunctionEntry(function_block_pos);
if (entry.is_valid()) {
if (entry.end_pos() <= function_block_pos) {
// End position greater than end of stream is safe, and hard
@@ -4550,6 +4564,26 @@ FunctionLiteral* Parser::ParseFunctionLiteral(Handle<String> function_name,
RelocInfo::kNoPosition)),
zone());
}
+
+ // For generators, allocate and yield an iterator on function entry.
+ if (is_generator) {
+ ZoneList<Expression*>* arguments =
+ new(zone()) ZoneList<Expression*>(0, zone());
+ CallRuntime* allocation = factory()->NewCallRuntime(
+ isolate()->factory()->empty_string(),
+ Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject),
+ arguments);
+ VariableProxy* init_proxy = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Assignment* assignment = factory()->NewAssignment(
+ Token::INIT_VAR, init_proxy, allocation, RelocInfo::kNoPosition);
+ VariableProxy* get_proxy = factory()->NewVariableProxy(
+ current_function_state_->generator_object_variable());
+ Yield* yield = factory()->NewYield(
+ get_proxy, assignment, false, RelocInfo::kNoPosition);
+ body->Add(factory()->NewExpressionStatement(yield), zone());
+ }
+
ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
materialized_literal_count = function_state.materialized_literal_count();
@@ -4646,14 +4680,14 @@ preparser::PreParser::PreParseResult Parser::LazyParseFunctionLiteral(
if (reusable_preparser_ == NULL) {
intptr_t stack_limit = isolate()->stack_guard()->real_climit();
- bool do_allow_lazy = true;
reusable_preparser_ = new preparser::PreParser(&scanner_,
NULL,
- stack_limit,
- do_allow_lazy,
- allow_natives_syntax_,
- allow_modules_,
- FLAG_harmony_generators);
+ stack_limit);
+ reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
+ reusable_preparser_->set_allow_modules(allow_modules());
+ reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
+ reusable_preparser_->set_allow_lazy(true);
+ reusable_preparser_->set_allow_generators(allow_generators());
}
preparser::PreParser::PreParseResult result =
reusable_preparser_->PreParseLazyFunction(top_scope_->language_mode(),
@@ -5900,20 +5934,18 @@ int ScriptDataImpl::ReadNumber(byte** source) {
// Create a Scanner for the preparser to use as input, and preparse the source.
-static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
- int flags,
- ParserRecorder* recorder) {
+ScriptDataImpl* PreParserApi::PreParse(Utf16CharacterStream* source) {
+ CompleteParserRecorder recorder;
Isolate* isolate = Isolate::Current();
HistogramTimerScope timer(isolate->counters()->pre_parse());
Scanner scanner(isolate->unicode_cache());
- scanner.SetHarmonyScoping(FLAG_harmony_scoping);
- scanner.Initialize(source);
intptr_t stack_limit = isolate->stack_guard()->real_climit();
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- recorder,
- flags,
- stack_limit);
+ preparser::PreParser preparser(&scanner, &recorder, stack_limit);
+ preparser.set_allow_lazy(true);
+ preparser.set_allow_generators(FLAG_harmony_generators);
+ preparser.set_allow_harmony_scoping(FLAG_harmony_scoping);
+ scanner.Initialize(source);
+ preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
if (result == preparser::PreParser::kPreParseStackOverflow) {
isolate->StackOverflow();
return NULL;
@@ -5921,26 +5953,11 @@ static ScriptDataImpl* DoPreParse(Utf16CharacterStream* source,
// Extract the accumulated data from the recorder as a single
// contiguous vector that we are responsible for disposing.
- Vector<unsigned> store = recorder->ExtractData();
+ Vector<unsigned> store = recorder.ExtractData();
return new ScriptDataImpl(store);
}
-ScriptDataImpl* ParserApi::PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags) {
- Handle<Script> no_script;
- if (FLAG_lazy && (extension == NULL)) {
- flags |= kAllowLazy;
- }
- if (FLAG_harmony_generators) {
- flags |= kAllowGenerators;
- }
- CompleteParserRecorder recorder;
- return DoPreParse(source, flags, &recorder);
-}
-
-
bool RegExpParser::ParseRegExp(FlatStringReader* input,
bool multiline,
RegExpCompileData* result,
@@ -5964,48 +5981,35 @@ bool RegExpParser::ParseRegExp(FlatStringReader* input,
}
-bool ParserApi::Parse(CompilationInfo* info, int parsing_flags) {
- ASSERT(info->function() == NULL);
+bool Parser::Parse() {
+ ASSERT(info()->function() == NULL);
FunctionLiteral* result = NULL;
- ASSERT((parsing_flags & kLanguageModeMask) == CLASSIC_MODE);
- if (!info->is_native() && FLAG_harmony_scoping) {
- // Harmony scoping is requested.
- parsing_flags |= EXTENDED_MODE;
- }
- if (!info->is_native() && FLAG_harmony_modules) {
- parsing_flags |= kAllowModules;
- }
- if (FLAG_allow_natives_syntax || info->is_native()) {
- // We require %identifier(..) syntax.
- parsing_flags |= kAllowNativesSyntax;
- }
- if (info->is_lazy()) {
- ASSERT(!info->is_eval());
- Parser parser(info, parsing_flags, NULL, NULL);
- if (info->shared_info()->is_function()) {
- result = parser.ParseLazy();
+ if (info()->is_lazy()) {
+ ASSERT(!info()->is_eval());
+ if (info()->shared_info()->is_function()) {
+ result = ParseLazy();
} else {
- result = parser.ParseProgram();
+ result = ParseProgram();
}
} else {
- ScriptDataImpl* pre_data = info->pre_parse_data();
- Parser parser(info, parsing_flags, info->extension(), pre_data);
- if (pre_data != NULL && pre_data->has_error()) {
- Scanner::Location loc = pre_data->MessageLocation();
- const char* message = pre_data->BuildMessage();
- Vector<const char*> args = pre_data->BuildArgs();
- parser.ReportMessageAt(loc, message, args);
+ ScriptDataImpl* pre_parse_data = info()->pre_parse_data();
+ set_pre_parse_data(pre_parse_data);
+ if (pre_parse_data != NULL && pre_parse_data->has_error()) {
+ Scanner::Location loc = pre_parse_data->MessageLocation();
+ const char* message = pre_parse_data->BuildMessage();
+ Vector<const char*> args = pre_parse_data->BuildArgs();
+ ReportMessageAt(loc, message, args);
DeleteArray(message);
for (int i = 0; i < args.length(); i++) {
DeleteArray(args[i]);
}
DeleteArray(args.start());
- ASSERT(info->isolate()->has_pending_exception());
+ ASSERT(info()->isolate()->has_pending_exception());
} else {
- result = parser.ParseProgram();
+ result = ParseProgram();
}
}
- info->SetFunction(result);
+ info()->SetFunction(result);
return (result != NULL);
}
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index fc4aba2b9..acf47bbcd 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -163,19 +163,17 @@ class ScriptDataImpl : public ScriptData {
};
-class ParserApi {
+class PreParserApi {
public:
- // Parses the source code represented by the compilation info and sets its
- // function literal. Returns false (and deallocates any allocated AST
- // nodes) if parsing failed.
- static bool Parse(CompilationInfo* info, int flags);
-
- // Generic preparser generating full preparse data.
- static ScriptDataImpl* PreParse(Utf16CharacterStream* source,
- v8::Extension* extension,
- int flags);
+ // Pre-parse a character stream and return full preparse data.
+ //
+ // This interface is here instead of in preparser.h because it instantiates a
+ // preparser recorder object that is suited to the parser's purposes. Also,
+ // the preparser doesn't know about ScriptDataImpl.
+ static ScriptDataImpl* PreParse(Utf16CharacterStream* source);
};
+
// ----------------------------------------------------------------------------
// REGEXP PARSING
@@ -295,7 +293,7 @@ class RegExpBuilder: public ZoneObject {
};
-class RegExpParser {
+class RegExpParser BASE_EMBEDDED {
public:
RegExpParser(FlatStringReader* in,
Handle<String>* error,
@@ -425,20 +423,36 @@ class RegExpParser {
// Forward declaration.
class SingletonLogger;
-class Parser {
+class Parser BASE_EMBEDDED {
public:
- Parser(CompilationInfo* info,
- int parsing_flags, // Combination of ParsingFlags
- v8::Extension* extension,
- ScriptDataImpl* pre_data);
- virtual ~Parser() {
+ explicit Parser(CompilationInfo* info);
+ ~Parser() {
delete reusable_preparser_;
reusable_preparser_ = NULL;
}
+ bool allow_natives_syntax() const { return allow_natives_syntax_; }
+ bool allow_lazy() const { return allow_lazy_; }
+ bool allow_modules() { return scanner().HarmonyModules(); }
+ bool allow_harmony_scoping() { return scanner().HarmonyScoping(); }
+ bool allow_generators() const { return allow_generators_; }
+
+ void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
+ void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
+ void set_allow_modules(bool allow) { scanner().SetHarmonyModules(allow); }
+ void set_allow_harmony_scoping(bool allow) {
+ scanner().SetHarmonyScoping(allow);
+ }
+ void set_allow_generators(bool allow) { allow_generators_ = allow; }
+
+ // Parses the source code represented by the compilation info and sets its
+ // function literal. Returns false (and deallocates any allocated AST
+ // nodes) if parsing failed.
+ static bool Parse(CompilationInfo* info) { return Parser(info).Parse(); }
+ bool Parse();
+
// Returns NULL if parsing failed.
FunctionLiteral* ParseProgram();
- FunctionLiteral* ParseLazy();
void ReportMessageAt(Scanner::Location loc,
const char* message,
@@ -474,7 +488,6 @@ class Parser {
public:
FunctionState(Parser* parser,
Scope* scope,
- bool is_generator,
Isolate* isolate);
~FunctionState();
@@ -505,7 +518,17 @@ class Parser {
void AddProperty() { expected_property_count_++; }
int expected_property_count() { return expected_property_count_; }
- bool is_generator() const { return is_generator_; }
+ void set_generator_object_variable(Variable *variable) {
+ ASSERT(variable != NULL);
+ ASSERT(!is_generator());
+ generator_object_variable_ = variable;
+ }
+ Variable* generator_object_variable() const {
+ return generator_object_variable_;
+ }
+ bool is_generator() const {
+ return generator_object_variable_ != NULL;
+ }
AstNodeFactory<AstConstructionVisitor>* factory() { return &factory_; }
@@ -521,14 +544,16 @@ class Parser {
// Properties count estimation.
int expected_property_count_;
- // Indicates that this function is a generator.
- bool is_generator_;
-
// Keeps track of assignments to properties of this. Used for
// optimizing constructors.
bool only_simple_this_property_assignments_;
Handle<FixedArray> this_property_assignments_;
+ // For generators, the variable that holds the generator object. This
+ // variable is used by yield expressions and return statements. NULL
+ // indicates that this function is not a generator.
+ Variable* generator_object_variable_;
+
Parser* parser_;
FunctionState* outer_function_state_;
Scope* outer_scope_;
@@ -552,6 +577,7 @@ class Parser {
Mode old_mode_;
};
+ FunctionLiteral* ParseLazy();
FunctionLiteral* ParseLazy(Utf16CharacterStream* source,
ZoneScope* zone_scope);
@@ -570,10 +596,15 @@ class Parser {
void ReportMessage(const char* message, Vector<const char*> args);
void ReportMessage(const char* message, Vector<Handle<String> > args);
+ void set_pre_parse_data(ScriptDataImpl *data) {
+ pre_parse_data_ = data;
+ symbol_cache_.Initialize(data ? data->symbol_count() : 0, zone());
+ }
+
bool inside_with() const { return top_scope_->inside_with(); }
Scanner& scanner() { return scanner_; }
Mode mode() const { return mode_; }
- ScriptDataImpl* pre_data() const { return pre_data_; }
+ ScriptDataImpl* pre_parse_data() const { return pre_parse_data_; }
bool is_extended_mode() {
ASSERT(top_scope_ != NULL);
return top_scope_->is_extended_mode();
@@ -835,13 +866,13 @@ class Parser {
FunctionState* current_function_state_;
Target* target_stack_; // for break, continue statements
v8::Extension* extension_;
- ScriptDataImpl* pre_data_;
+ ScriptDataImpl* pre_parse_data_;
FuncNameInferrer* fni_;
Mode mode_;
bool allow_natives_syntax_;
bool allow_lazy_;
- bool allow_modules_;
+ bool allow_generators_;
bool stack_overflow_;
// If true, the next (and immediately following) function literal is
// preceded by a parenthesis.
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 67c389e79..6804af8b9 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -43,6 +43,7 @@
#include "platform-posix.h"
#include "platform.h"
+#include "simulator.h"
#include "v8threads.h"
#include "vm-state-inl.h"
#include "win32-headers.h"
@@ -193,6 +194,11 @@ void OS::DebugBreak() {
}
+void OS::DumpBacktrace() {
+ // Currently unsupported.
+}
+
+
class PosixMemoryMappedFile : public OS::MemoryMappedFile {
public:
PosixMemoryMappedFile(FILE* file, void* memory, int size)
@@ -318,38 +324,127 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
// This causes VirtualMemory::Commit to not always commit the memory region
// specified.
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
+static void* GetRandomAddr() {
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ // Note that the current isolate isn't set up in a call path via
+ // CpuFeatures::Probe. We don't care about randomization in this case because
+ // the code page is immediately freed.
+ if (isolate != NULL) {
+ // The address range used to randomize RWX allocations in OS::Allocate
+ // Try not to map pages into the default range that windows loads DLLs
+ // Use a multiple of 64k to prevent committing unused memory.
+ // Note: This does not guarantee RWX regions will be within the
+ // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+ static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+ static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+ static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+ uintptr_t address = (V8::RandomPrivate(isolate) << kPageSizeBits)
+ | kAllocationRandomAddressMin;
+ address &= kAllocationRandomAddressMax;
+ return reinterpret_cast<void *>(address);
+ }
+ return NULL;
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+ LPVOID base = NULL;
+
+ if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+ // For exectutable pages try and randomize the allocation address
+ for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+ base = VirtualAlloc(GetRandomAddr(), size, action, protection);
+ }
+ }
+
+ // After three attempts give up and let the OS find an address to use.
+ if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+ return base;
}
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = VirtualAlloc(NULL, size, MEM_RESERVE, PAGE_NOACCESS);
- size_ = size;
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+ : address_(NULL), size_(0) {
+ ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
+ size_t request_size = RoundUp(size + alignment,
+ static_cast<intptr_t>(OS::AllocateAlignment()));
+ void* address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ Address base = RoundUp(static_cast<Address>(address), alignment);
+ // Try reducing the size by freeing and then reallocating a specific area.
+ bool result = ReleaseRegion(address, request_size);
+ USE(result);
+ ASSERT(result);
+ address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (address != NULL) {
+ request_size = size;
+ ASSERT(base == static_cast<Address>(address));
+ } else {
+ // Resizing failed, just go with a bigger area.
+ address = ReserveRegion(request_size);
+ if (address == NULL) return;
+ }
+ address_ = address;
+ size_ = request_size;
}
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- if (0 == VirtualFree(address(), 0, MEM_RELEASE)) address_ = NULL;
+ bool result = ReleaseRegion(address_, size_);
+ ASSERT(result);
+ USE(result);
}
}
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
- if (NULL == VirtualAlloc(address, size, MEM_COMMIT, prot)) {
- return false;
- }
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
+
+void VirtualMemory::Reset() {
+ address_ = NULL;
+ size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
}
bool VirtualMemory::Uncommit(void* address, size_t size) {
ASSERT(IsReserved());
- return VirtualFree(address, size, MEM_DECOMMIT) != false;
+ return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+ return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+ if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+ return false;
+ }
+
+ UpdateAllocatedSpaceLimits(base, static_cast<int>(size));
+ return true;
}
@@ -364,6 +459,16 @@ bool VirtualMemory::Guard(void* address) {
}
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
@@ -377,11 +482,10 @@ class Thread::PlatformData : public Malloced {
};
-
-
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -398,7 +502,7 @@ static void* ThreadEntry(void* arg) {
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -591,143 +695,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-// ----------------------------------------------------------------------------
-// Cygwin profiler support.
-//
-// On Cygwin we use the same sampler implementation as on win32.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- Isolate* isolate = sampler->isolate();
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = isolate->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -737,44 +704,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SamplerThread::SetUp();
}
void OS::TearDown() {
- SamplerThread::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index 14f7171a3..735f07899 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -357,12 +357,12 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -500,7 +500,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -517,7 +518,7 @@ static void* ThreadEntry(void* arg) {
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -681,174 +682,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-static pthread_t GetThreadID() {
- pthread_t thread_id = pthread_self();
- return thread_id;
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
-#elif V8_HOST_ARCH_ARM
- sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
- sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
- sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Install a signal handler.
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
-
- // Start a thread that sends SIGPROF signal to VM threads.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
-
- // Restore the old signal handler.
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -858,44 +691,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SignalSender::SetUp();
}
void OS::TearDown() {
- SignalSender::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 1f9cde151..38f7c6783 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -38,7 +38,7 @@
#include <sys/types.h>
#include <stdlib.h>
-#if defined(__GLIBC__)
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
#include <execinfo.h>
#include <cxxabi.h>
#endif
@@ -51,9 +51,6 @@
#include <sys/stat.h> // open
#include <fcntl.h> // open
#include <unistd.h> // sysconf
-#if defined(__GLIBC__) && !defined(__UCLIBC__)
-#include <execinfo.h> // backtrace, backtrace_symbols
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__)
#include <strings.h> // index
#include <errno.h>
#include <stdarg.h>
@@ -146,9 +143,6 @@ bool OS::ArmCpuHasFeature(CpuFeature feature) {
// facility is universally available on the ARM architectures,
// so it's up to individual OSes to provide such.
switch (feature) {
- case VFP2:
- search_string = "vfp";
- break;
case VFP3:
search_string = "vfpv3";
break;
@@ -272,8 +266,10 @@ bool OS::MipsCpuHasFeature(CpuFeature feature) {
FILE* f = NULL;
const char* what = search_string;
- if (NULL == (f = fopen(file_name, "r")))
+ if (NULL == (f = fopen(file_name, "r"))) {
+ OS::PrintError("Failed to open /proc/cpuinfo\n");
return false;
+ }
int k;
while (EOF != (k = fgetc(f))) {
@@ -428,6 +424,8 @@ void OS::DebugBreak() {
# endif
#elif defined(__mips__)
asm("break");
+#elif defined(__native_client__)
+ asm("hlt");
#else
asm("int $3");
#endif
@@ -435,7 +433,7 @@ void OS::DebugBreak() {
void OS::DumpBacktrace() {
-#if defined(__GLIBC__)
+#if defined(__GLIBC__) && !defined(__UCLIBC__)
void* trace[100];
int size = backtrace(trace, ARRAY_SIZE(trace));
char** symbols = backtrace_symbols(trace, size);
@@ -644,12 +642,12 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -787,7 +785,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -809,7 +808,7 @@ static void* ThreadEntry(void* arg) {
#endif
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -822,12 +821,16 @@ void Thread::set_name(const char* name) {
void Thread::Start() {
pthread_attr_t* attr_ptr = NULL;
+#if defined(__native_client__)
+ // use default stack size.
+#else
pthread_attr_t attr;
if (stack_size_ > 0) {
pthread_attr_init(&attr);
pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
attr_ptr = &attr;
}
+#endif
int result = pthread_create(&data_->thread_, attr_ptr, ThreadEntry, this);
CHECK_EQ(0, result);
ASSERT(data_->thread_ != kNoThread);
@@ -988,349 +991,17 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-// Not all versions of Android's C library provide ucontext_t.
-// Detect this and provide custom but compatible definitions. Note that these
-// follow the GLibc naming convention to access register values from
-// mcontext_t.
-//
-// See http://code.google.com/p/android/issues/detail?id=34784
-
-#if defined(__arm__)
-
-typedef struct sigcontext mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__mips__)
-// MIPS version of sigcontext, for Android bionic.
-typedef struct {
- uint32_t regmask;
- uint32_t status;
- uint64_t pc;
- uint64_t gregs[32];
- uint64_t fpregs[32];
- uint32_t acx;
- uint32_t fpc_csr;
- uint32_t fpc_eir;
- uint32_t used_math;
- uint32_t dsp;
- uint64_t mdhi;
- uint64_t mdlo;
- uint32_t hi1;
- uint32_t lo1;
- uint32_t hi2;
- uint32_t lo2;
- uint32_t hi3;
- uint32_t lo3;
-} mcontext_t;
-
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-
-#elif defined(__i386__)
-// x86 version for Android.
-typedef struct {
- uint32_t gregs[19];
- void* fpregs;
- uint32_t oldmask;
- uint32_t cr2;
-} mcontext_t;
-
-typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
-typedef struct ucontext {
- uint32_t uc_flags;
- struct ucontext* uc_link;
- stack_t uc_stack;
- mcontext_t uc_mcontext;
- // Other fields are not used by V8, don't define them here.
-} ucontext_t;
-enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
-#endif
-
-#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
-
-static int GetThreadID() {
-#if defined(__ANDROID__)
- // Android's C library provides gettid(2).
- return gettid();
-#else
- // Glibc doesn't provide a wrapper for gettid(2).
- return syscall(SYS_gettid);
-#endif
-}
-
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
-#elif V8_HOST_ARCH_ARM
-#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
- (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
- // Old GLibc ARM versions used a gregs[] array to access the register
- // values from mcontext_t.
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
-#else
- sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
- sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
- sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
-#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
- // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
-#elif V8_HOST_ARCH_MIPS
- sample->pc = reinterpret_cast<Address>(mcontext.pc);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
-#endif // V8_HOST_ARCH_*
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- int vm_tid() const { return vm_tid_; }
-
- private:
- const int vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(int tid) {
- if (!signal_handler_installed_) return;
- // Glibc doesn't provide a wrapper for tgkill(2).
-#if defined(ANDROID)
- syscall(__NR_tgkill, vm_tgid_, tid, SIGPROF);
-#else
- int result = syscall(SYS_tgkill, vm_tgid_, tid, SIGPROF);
- USE(result);
- ASSERT(result == 0);
-#endif
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
-#if defined(ANDROID)
- usleep(interval);
-#else
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif // DEBUG
- USE(result);
-#endif // ANDROID
- }
-
- const int vm_tgid_;
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
-
-#ifdef __arm__
- // When running on ARM hardware check that the EABI used by V8 and
- // by the C code is the same.
- bool hard_float = OS::ArmUsingHardFloat();
- if (hard_float) {
-#if !USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary compiled with -mfloat-abi=hard but without "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- } else {
-#if USE_EABI_HARDFLOAT
- PrintF("ERROR: Binary not compiled with -mfloat-abi=hard but with "
- "-DUSE_EABI_HARDFLOAT\n");
- exit(1);
-#endif
- }
-#endif
- SignalSender::SetUp();
}
void OS::TearDown() {
- SignalSender::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index 30e2b890b..9bb4dbdd3 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -60,6 +60,7 @@
#include "platform-posix.h"
#include "platform.h"
+#include "simulator.h"
#include "vm-state-inl.h"
// Manually define these here as weak imports, rather than including execinfo.h.
@@ -407,12 +408,33 @@ VirtualMemory::~VirtualMemory() {
}
+bool VirtualMemory::IsReserved() {
+ return address_ != NULL;
+}
+
+
void VirtualMemory::Reset() {
address_ = NULL;
size_ = 0;
}
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+ return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+ return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+ OS::Guard(address, OS::CommitPageSize());
+ return true;
+}
+
+
void* VirtualMemory::ReserveRegion(size_t size) {
void* result = mmap(OS::GetRandomMmapAddr(),
size,
@@ -427,22 +449,6 @@ void* VirtualMemory::ReserveRegion(size_t size) {
}
-bool VirtualMemory::IsReserved() {
- return address_ != NULL;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
- OS::Guard(address, OS::CommitPageSize());
- return true;
-}
-
-
bool VirtualMemory::CommitRegion(void* address,
size_t size,
bool is_executable) {
@@ -461,11 +467,6 @@ bool VirtualMemory::CommitRegion(void* address,
}
-bool VirtualMemory::Uncommit(void* address, size_t size) {
- return UncommitRegion(address, size);
-}
-
-
bool VirtualMemory::UncommitRegion(void* address, size_t size) {
return mmap(address,
size,
@@ -495,7 +496,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -530,7 +532,7 @@ static void* ThreadEntry(void* arg) {
thread->data()->thread_ = pthread_self();
SetThreadName(thread->name());
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -741,186 +743,17 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : profiled_thread_(mach_thread_self()) {}
-
- ~PlatformData() {
- // Deallocate Mach port for thread.
- mach_port_deallocate(mach_task_self(), profiled_thread_);
- }
-
- thread_act_t profiled_thread() { return profiled_thread_; }
-
- private:
- // Note: for profiled_thread_ Mach primitives are used instead of PThread's
- // because the latter doesn't provide thread manipulation primitives required.
- // For details, consult "Mac OS X Internals" book, Section 7.3.
- thread_act_t profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
- Isolate* isolate = sampler->isolate();
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
-
-#if V8_HOST_ARCH_X64
- thread_state_flavor_t flavor = x86_THREAD_STATE64;
- x86_thread_state64_t state;
- mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __r ## name
-#else
-#define REGISTER_FIELD(name) r ## name
-#endif // __DARWIN_UNIX03
-#elif V8_HOST_ARCH_IA32
- thread_state_flavor_t flavor = i386_THREAD_STATE;
- i386_thread_state_t state;
- mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
-#if __DARWIN_UNIX03
-#define REGISTER_FIELD(name) __e ## name
-#else
-#define REGISTER_FIELD(name) e ## name
-#endif // __DARWIN_UNIX03
-#else
-#error Unsupported Mac OS X host architecture.
-#endif // V8_HOST_ARCH
-
- if (thread_get_state(profiled_thread,
- flavor,
- reinterpret_cast<natural_t*>(&state),
- &count) == KERN_SUCCESS) {
- sample->state = isolate->current_vm_state();
- sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
- sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
- sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- thread_resume(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-#undef REGISTER_FIELD
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SamplerThread::SetUp();
}
void OS::TearDown() {
- SamplerThread::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index 0fef0633e..1b481f4b3 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -317,6 +317,16 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
}
+VirtualMemory::VirtualMemory() {
+ UNIMPLEMENTED();
+}
+
+
+VirtualMemory::VirtualMemory(size_t size) {
+ UNIMPLEMENTED();
+}
+
+
VirtualMemory::VirtualMemory(size_t size, void* address_hint) {
UNIMPLEMENTED();
}
@@ -333,6 +343,11 @@ bool VirtualMemory::IsReserved() {
}
+void VirtualMemory::Reset() {
+ UNIMPLEMENTED();
+}
+
+
bool VirtualMemory::Commit(void* address, size_t size, bool executable) {
UNIMPLEMENTED();
return false;
@@ -351,6 +366,30 @@ bool VirtualMemory::Guard(void* address) {
}
+void* VirtualMemory::ReserveRegion(size_t size) {
+ UNIMPLEMENTED();
+ return NULL;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+ UNIMPLEMENTED();
+ return false;
+}
+
+
bool VirtualMemory::HasLazyCommits() {
// TODO(alph): implement for the platform.
return false;
@@ -369,7 +408,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size) {
+ stack_size_(options.stack_size),
+ start_semaphore_(NULL) {
set_name(options.name);
UNIMPLEMENTED();
}
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index e48d4cb35..e1aff1737 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -385,12 +385,12 @@ int OS::StackWalk(Vector<OS::StackFrame> frames) {
static const int kMmapFd = -1;
static const int kMmapFdOffset = 0;
+
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -529,7 +529,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -551,7 +552,7 @@ static void* ThreadEntry(void* arg) {
#endif
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -727,232 +728,17 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- sample->state = isolate->current_vm_state();
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
-#ifdef __NetBSD__
- mcontext_t& mcontext = ucontext->uc_mcontext;
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
- sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
- sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
-#endif // V8_HOST_ARCH
-#else // OpenBSD
-#if V8_HOST_ARCH_IA32
- sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
-#elif V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
- sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
- sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
-#endif // V8_HOST_ARCH
-#endif // __NetBSD__
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- vm_tgid_(getpid()),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int vm_tgid_;
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
void OS::SetUp() {
// Seed the random number generator. We preserve microsecond resolution.
uint64_t seed = Ticks() ^ (getpid() << 16);
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SignalSender::SetUp();
}
void OS::TearDown() {
- SignalSender::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index 69d39112a..48898ed9a 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -94,6 +94,12 @@ void OS::Guard(void* address, const size_t size) {
void* OS::GetRandomMmapAddr() {
+#if defined(__native_client__)
+ // TODO(bradchen): restore randomization once Native Client gets
+ // smarter about using mmap address hints.
+ // See http://code.google.com/p/nativeclient/issues/3341
+ return NULL;
+#endif
Isolate* isolate = Isolate::UncheckedCurrent();
// Note that the current isolate isn't set up in a call path via
// CpuFeatures::Probe. We don't care about randomization in this case because
@@ -109,26 +115,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- raw_addr &= 0x3ffff000;
-
-# ifdef __sun
- // For our Solaris/illumos mmap hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); Solaris and
- // illumos will try the hint and if that fails allocate as if there were
- // no hint at all. The high hint prevents the break from getting hemmed in
- // at low values, ceding half of the address space to the system heap.
- raw_addr += 0x80000000;
-# else
// The range 0x20000000 - 0x60000000 is relatively unpopulated across a
// variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
// 10.6 and 10.7.
+ raw_addr &= 0x3ffff000;
raw_addr += 0x20000000;
-# endif
#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -331,25 +322,32 @@ int OS::VSNPrintF(Vector<char> str,
#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
+
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
-// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+// Copy memory area. No restrictions.
+void OS::MemMove(void* dest, const void* src, size_t size) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
+
#endif // V8_TARGET_ARCH_IA32
void POSIXPostSetUp() {
#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
#endif
init_fast_sin_function();
init_fast_cos_function();
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 0e616d1ab..aeacab9d5 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -327,10 +327,9 @@ static const int kMmapFdOffset = 0;
VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-VirtualMemory::VirtualMemory(size_t size) {
- address_ = ReserveRegion(size);
- size_ = size;
-}
+
+VirtualMemory::VirtualMemory(size_t size)
+ : address_(ReserveRegion(size)), size_(size) { }
VirtualMemory::VirtualMemory(size_t size, size_t alignment)
@@ -470,7 +469,8 @@ class Thread::PlatformData : public Malloced {
Thread::Thread(const Options& options)
: data_(new PlatformData()),
- stack_size_(options.stack_size()) {
+ stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
set_name(options.name());
}
@@ -487,7 +487,7 @@ static void* ThreadEntry(void* arg) {
// one) so we initialize it here too.
thread->data()->thread_ = pthread_self();
ASSERT(thread->data()->thread_ != kNoThread);
- thread->Run();
+ thread->NotifyStartedAndRun();
return NULL;
}
@@ -661,169 +661,6 @@ Semaphore* OS::CreateSemaphore(int count) {
}
-static pthread_t GetThreadID() {
- return pthread_self();
-}
-
-static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
- USE(info);
- if (signal != SIGPROF) return;
- Isolate* isolate = Isolate::UncheckedCurrent();
- if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
- // We require a fully initialized and entered isolate.
- return;
- }
- if (v8::Locker::IsActive() &&
- !isolate->thread_manager()->IsLockedByCurrentThread()) {
- return;
- }
-
- Sampler* sampler = isolate->logger()->sampler();
- if (sampler == NULL || !sampler->IsActive()) return;
-
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- // Extracting the sample from the context is extremely machine dependent.
- ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
- mcontext_t& mcontext = ucontext->uc_mcontext;
- sample->state = isolate->current_vm_state();
-
- sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
- sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
- sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
-
- sampler->SampleStack(sample);
- sampler->Tick(sample);
-}
-
-class Sampler::PlatformData : public Malloced {
- public:
- PlatformData() : vm_tid_(GetThreadID()) {}
-
- pthread_t vm_tid() const { return vm_tid_; }
-
- private:
- pthread_t vm_tid_;
-};
-
-
-class SignalSender : public Thread {
- public:
- static const int kSignalSenderStackSize = 64 * KB;
-
- explicit SignalSender(int interval)
- : Thread(Thread::Options("SignalSender", kSignalSenderStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void InstallSignalHandler() {
- struct sigaction sa;
- sa.sa_sigaction = ProfilerSignalHandler;
- sigemptyset(&sa.sa_mask);
- sa.sa_flags = SA_RESTART | SA_SIGINFO;
- signal_handler_installed_ =
- (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
- }
-
- static void RestoreSignalHandler() {
- if (signal_handler_installed_) {
- sigaction(SIGPROF, &old_signal_handler_, 0);
- signal_handler_installed_ = false;
- }
- }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- // Start a thread that will send SIGPROF signal to VM threads,
- // when CPU profiling will be enabled.
- instance_ = new SignalSender(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- RestoreSignalHandler();
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sender) {
- if (!sampler->IsProfiling()) return;
- SignalSender* sender = reinterpret_cast<SignalSender*>(raw_sender);
- sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
- }
-
- void SendProfilingSignal(pthread_t tid) {
- if (!signal_handler_installed_) return;
- pthread_kill(tid, SIGPROF);
- }
-
- void Sleep() {
- // Convert ms to us and subtract 100 us to compensate delays
- // occuring during signal delivery.
- useconds_t interval = interval_ * 1000 - 100;
- int result = usleep(interval);
-#ifdef DEBUG
- if (result != 0 && errno != EINTR) {
- fprintf(stderr,
- "SignalSender usleep error; interval = %u, errno = %d\n",
- interval,
- errno);
- ASSERT(result == 0 || errno == EINTR);
- }
-#endif
- USE(result);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SignalSender* instance_;
- static bool signal_handler_installed_;
- static struct sigaction old_signal_handler_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SignalSender);
-};
-
-Mutex* SignalSender::mutex_ = NULL;
-SignalSender* SignalSender::instance_ = NULL;
-struct sigaction SignalSender::old_signal_handler_;
-bool SignalSender::signal_handler_installed_ = false;
-
-
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -833,43 +670,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srandom(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SignalSender::SetUp();
}
void OS::TearDown() {
- SignalSender::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SignalSender::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SignalSender::RemoveActiveSampler(this);
- SetActive(false);
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index c1bae9352..272678fe6 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -45,6 +45,7 @@
#include "codegen.h"
#include "platform.h"
+#include "simulator.h"
#include "vm-state-inl.h"
#ifdef _MSC_VER
@@ -147,19 +148,23 @@ double ceiling(double x) {
static Mutex* limit_mutex = NULL;
#if defined(V8_TARGET_ARCH_IA32)
-static OS::MemCopyFunction memcopy_function = NULL;
+static void MemMoveWrapper(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+}
+
+// Initialize to library version so we can call this at any time during startup.
+static OS::MemMoveFunction memmove_function = &MemMoveWrapper;
+
// Defined in codegen-ia32.cc.
-OS::MemCopyFunction CreateMemCopyFunction();
+OS::MemMoveFunction CreateMemMoveFunction();
// Copy memory area to disjoint memory area.
-void OS::MemCopy(void* dest, const void* src, size_t size) {
+void OS::MemMove(void* dest, const void* src, size_t size) {
// Note: here we rely on dependent reads being ordered. This is true
// on all architectures we currently support.
- (*memcopy_function)(dest, src, size);
-#ifdef DEBUG
- CHECK_EQ(0, memcmp(dest, src, size));
-#endif
+ (*memmove_function)(dest, src, size);
}
+
#endif // V8_TARGET_ARCH_IA32
#ifdef _WIN64
@@ -575,7 +580,10 @@ void OS::PostSetUp() {
// CPU.
MathSetup();
#if defined(V8_TARGET_ARCH_IA32)
- memcopy_function = CreateMemCopyFunction();
+ OS::MemMoveFunction generated_memmove = CreateMemMoveFunction();
+ if (generated_memmove != NULL) {
+ memmove_function = generated_memmove;
+ }
#endif
}
@@ -1061,7 +1069,7 @@ OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
if (file_mapping == NULL) return NULL;
// Map a view of the file into memory
void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
- if (memory) memmove(memory, initial, size);
+ if (memory) OS::MemMove(memory, initial, size);
return new Win32MemoryMappedFile(file, file_mapping, memory, size);
}
@@ -1517,7 +1525,7 @@ VirtualMemory::VirtualMemory(size_t size, size_t alignment)
VirtualMemory::~VirtualMemory() {
if (IsReserved()) {
- bool result = ReleaseRegion(address_, size_);
+ bool result = ReleaseRegion(address(), size());
ASSERT(result);
USE(result);
}
@@ -1536,11 +1544,7 @@ void VirtualMemory::Reset() {
bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
- if (CommitRegion(address, size, is_executable)) {
- UpdateAllocatedSpaceLimits(address, static_cast<int>(size));
- return true;
- }
- return false;
+ return CommitRegion(address, size, is_executable);
}
@@ -1550,6 +1554,17 @@ bool VirtualMemory::Uncommit(void* address, size_t size) {
}
+bool VirtualMemory::Guard(void* address) {
+ if (NULL == VirtualAlloc(address,
+ OS::CommitPageSize(),
+ MEM_COMMIT,
+ PAGE_READONLY | PAGE_GUARD)) {
+ return false;
+ }
+ return true;
+}
+
+
void* VirtualMemory::ReserveRegion(size_t size) {
return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
}
@@ -1566,17 +1581,6 @@ bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
}
-bool VirtualMemory::Guard(void* address) {
- if (NULL == VirtualAlloc(address,
- OS::CommitPageSize(),
- MEM_COMMIT,
- PAGE_READONLY | PAGE_GUARD)) {
- return false;
- }
- return true;
-}
-
-
bool VirtualMemory::UncommitRegion(void* base, size_t size) {
return VirtualFree(base, size, MEM_DECOMMIT) != 0;
}
@@ -1605,7 +1609,7 @@ static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
// convention.
static unsigned int __stdcall ThreadEntry(void* arg) {
Thread* thread = reinterpret_cast<Thread*>(arg);
- thread->Run();
+ thread->NotifyStartedAndRun();
return 0;
}
@@ -1622,7 +1626,8 @@ class Thread::PlatformData : public Malloced {
// handle until it is started.
Thread::Thread(const Options& options)
- : stack_size_(options.stack_size()) {
+ : stack_size_(options.stack_size()),
+ start_semaphore_(NULL) {
data_ = new PlatformData(kNoThread);
set_name(options.name());
}
@@ -1970,141 +1975,6 @@ Socket* OS::CreateSocket() {
}
-// ----------------------------------------------------------------------------
-// Win32 profiler support.
-
-class Sampler::PlatformData : public Malloced {
- public:
- // Get a handle to the calling thread. This is the thread that we are
- // going to profile. We need to make a copy of the handle because we are
- // going to use it in the sampler thread. Using GetThreadHandle() will
- // not work in this case. We're using OpenThread because DuplicateHandle
- // for some reason doesn't work in Chrome's sandbox.
- PlatformData() : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
- THREAD_SUSPEND_RESUME |
- THREAD_QUERY_INFORMATION,
- false,
- GetCurrentThreadId())) {}
-
- ~PlatformData() {
- if (profiled_thread_ != NULL) {
- CloseHandle(profiled_thread_);
- profiled_thread_ = NULL;
- }
- }
-
- HANDLE profiled_thread() { return profiled_thread_; }
-
- private:
- HANDLE profiled_thread_;
-};
-
-
-class SamplerThread : public Thread {
- public:
- static const int kSamplerThreadStackSize = 64 * KB;
-
- explicit SamplerThread(int interval)
- : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
- interval_(interval) {}
-
- static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
- static void TearDown() { delete mutex_; }
-
- static void AddActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::AddActiveSampler(sampler);
- if (instance_ == NULL) {
- instance_ = new SamplerThread(sampler->interval());
- instance_->Start();
- } else {
- ASSERT(instance_->interval_ == sampler->interval());
- }
- }
-
- static void RemoveActiveSampler(Sampler* sampler) {
- ScopedLock lock(mutex_);
- SamplerRegistry::RemoveActiveSampler(sampler);
- if (SamplerRegistry::GetState() == SamplerRegistry::HAS_NO_SAMPLERS) {
- RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(instance_);
- delete instance_;
- instance_ = NULL;
- }
- }
-
- // Implement Thread::Run().
- virtual void Run() {
- SamplerRegistry::State state;
- while ((state = SamplerRegistry::GetState()) !=
- SamplerRegistry::HAS_NO_SAMPLERS) {
- // When CPU profiling is enabled both JavaScript and C++ code is
- // profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (RuntimeProfiler::WaitForSomeIsolateToEnterJS()) continue;
- }
- OS::Sleep(interval_);
- }
- }
-
- static void DoCpuProfile(Sampler* sampler, void* raw_sampler_thread) {
- if (!sampler->isolate()->IsInitialized()) return;
- if (!sampler->IsProfiling()) return;
- SamplerThread* sampler_thread =
- reinterpret_cast<SamplerThread*>(raw_sampler_thread);
- sampler_thread->SampleContext(sampler);
- }
-
- void SampleContext(Sampler* sampler) {
- HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
- if (profiled_thread == NULL) return;
-
- // Context used for sampling the register state of the profiled thread.
- CONTEXT context;
- memset(&context, 0, sizeof(context));
-
- Isolate* isolate = sampler->isolate();
- TickSample sample_obj;
- TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
- if (sample == NULL) sample = &sample_obj;
-
- static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
- if (SuspendThread(profiled_thread) == kSuspendFailed) return;
- sample->state = isolate->current_vm_state();
-
- context.ContextFlags = CONTEXT_FULL;
- if (GetThreadContext(profiled_thread, &context) != 0) {
-#if V8_HOST_ARCH_X64
- sample->pc = reinterpret_cast<Address>(context.Rip);
- sample->sp = reinterpret_cast<Address>(context.Rsp);
- sample->fp = reinterpret_cast<Address>(context.Rbp);
-#else
- sample->pc = reinterpret_cast<Address>(context.Eip);
- sample->sp = reinterpret_cast<Address>(context.Esp);
- sample->fp = reinterpret_cast<Address>(context.Ebp);
-#endif
- sampler->SampleStack(sample);
- sampler->Tick(sample);
- }
- ResumeThread(profiled_thread);
- }
-
- const int interval_;
-
- // Protects the process wide state below.
- static Mutex* mutex_;
- static SamplerThread* instance_;
-
- private:
- DISALLOW_COPY_AND_ASSIGN(SamplerThread);
-};
-
-
-Mutex* SamplerThread::mutex_ = NULL;
-SamplerThread* SamplerThread::instance_ = NULL;
-
-
void OS::SetUp() {
// Seed the random number generator.
// Convert the current time to a 64-bit integer first, before converting it
@@ -2114,44 +1984,12 @@ void OS::SetUp() {
uint64_t seed = static_cast<uint64_t>(TimeCurrentMillis());
srand(static_cast<unsigned int>(seed));
limit_mutex = CreateMutex();
- SamplerThread::SetUp();
}
void OS::TearDown() {
- SamplerThread::TearDown();
delete limit_mutex;
}
-Sampler::Sampler(Isolate* isolate, int interval)
- : isolate_(isolate),
- interval_(interval),
- profiling_(false),
- active_(false),
- samples_taken_(0) {
- data_ = new PlatformData;
-}
-
-
-Sampler::~Sampler() {
- ASSERT(!IsActive());
- delete data_;
-}
-
-
-void Sampler::Start() {
- ASSERT(!IsActive());
- SetActive(true);
- SamplerThread::AddActiveSampler(this);
-}
-
-
-void Sampler::Stop() {
- ASSERT(IsActive());
- SamplerThread::RemoveActiveSampler(this);
- SetActive(false);
-}
-
-
} } // namespace v8::internal
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index f2a228c92..ab75d74f4 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -91,8 +91,10 @@ inline int lrint(double flt) {
#endif // _MSC_VER
+#ifndef __CYGWIN__
// Random is missing on both Visual Studio and MinGW.
int random();
+#endif
#endif // WIN32
@@ -105,14 +107,6 @@ int random();
namespace v8 {
namespace internal {
-// Use AtomicWord for a machine-sized pointer. It is assumed that
-// reads and writes of naturally aligned values of this type are atomic.
-#if defined(__OpenBSD__) && defined(__i386__)
-typedef Atomic32 AtomicWord;
-#else
-typedef intptr_t AtomicWord;
-#endif
-
class Semaphore;
class Mutex;
@@ -337,17 +331,27 @@ class OS {
static void ReleaseStore(volatile AtomicWord* ptr, AtomicWord value);
#if defined(V8_TARGET_ARCH_IA32)
- // Copy memory area to disjoint memory area.
- static void MemCopy(void* dest, const void* src, size_t size);
// Limit below which the extra overhead of the MemCopy function is likely
// to outweigh the benefits of faster copying.
static const int kMinComplexMemCopy = 64;
- typedef void (*MemCopyFunction)(void* dest, const void* src, size_t size);
+ // Copy memory area. No restrictions.
+ static void MemMove(void* dest, const void* src, size_t size);
+ typedef void (*MemMoveFunction)(void* dest, const void* src, size_t size);
+
+ // Keep the distinction of "move" vs. "copy" for the benefit of other
+ // architectures.
+ static void MemCopy(void* dest, const void* src, size_t size) {
+ MemMove(dest, src, size);
+ }
#else // V8_TARGET_ARCH_IA32
+ // Copy memory area to disjoint memory area.
static void MemCopy(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
}
+ static void MemMove(void* dest, const void* src, size_t size) {
+ memmove(dest, src, size);
+ }
static const int kMinComplexMemCopy = 16 * kPointerSize;
#endif // V8_TARGET_ARCH_IA32
@@ -453,6 +457,59 @@ class VirtualMemory {
// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero, threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore {
+ public:
+ virtual ~Semaphore() {}
+
+ // Suspends the calling thread until the semaphore counter is non zero
+ // and then decrements the semaphore counter.
+ virtual void Wait() = 0;
+
+ // Suspends the calling thread until the counter is non zero or the timeout
+ // time has passed. If timeout happens the return value is false and the
+ // counter is unchanged. Otherwise the semaphore counter is decremented and
+ // true is returned. The timeout value is specified in microseconds.
+ virtual bool Wait(int timeout) = 0;
+
+ // Increments the semaphore counter.
+ virtual void Signal() = 0;
+};
+
+template <int InitialValue>
+struct CreateSemaphoreTrait {
+ static Semaphore* Create() {
+ return OS::CreateSemaphore(InitialValue);
+ }
+};
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+// // The following semaphore starts at 0.
+// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+// void my_function() {
+// // Do something with my_semaphore.Pointer().
+// }
+//
+template <int InitialValue>
+struct LazySemaphore {
+ typedef typename LazyDynamicInstance<
+ Semaphore, CreateSemaphoreTrait<InitialValue>,
+ ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+
+// ----------------------------------------------------------------------------
// Thread
//
// Thread objects are used for creating and running threads. When the start()
@@ -489,9 +546,18 @@ class Thread {
explicit Thread(const Options& options);
virtual ~Thread();
- // Start new thread by calling the Run() method in the new thread.
+ // Start new thread by calling the Run() method on the new thread.
void Start();
+ // Start new thread and wait until Run() method is called on the new thread.
+ void StartSynchronously() {
+ start_semaphore_ = OS::CreateSemaphore(0);
+ Start();
+ start_semaphore_->Wait();
+ delete start_semaphore_;
+ start_semaphore_ = NULL;
+ }
+
// Wait until thread terminates.
void Join();
@@ -541,6 +607,11 @@ class Thread {
class PlatformData;
PlatformData* data() { return data_; }
+ void NotifyStartedAndRun() {
+ if (start_semaphore_) start_semaphore_->Signal();
+ Run();
+ }
+
private:
void set_name(const char* name);
@@ -548,6 +619,7 @@ class Thread {
char name_[kMaxThreadNameLength];
int stack_size_;
+ Semaphore* start_semaphore_;
DISALLOW_COPY_AND_ASSIGN(Thread);
};
@@ -620,59 +692,6 @@ class ScopedLock {
// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero, threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore {
- public:
- virtual ~Semaphore() {}
-
- // Suspends the calling thread until the semaphore counter is non zero
- // and then decrements the semaphore counter.
- virtual void Wait() = 0;
-
- // Suspends the calling thread until the counter is non zero or the timeout
- // time has passed. If timeout happens the return value is false and the
- // counter is unchanged. Otherwise the semaphore counter is decremented and
- // true is returned. The timeout value is specified in microseconds.
- virtual bool Wait(int timeout) = 0;
-
- // Increments the semaphore counter.
- virtual void Signal() = 0;
-};
-
-template <int InitialValue>
-struct CreateSemaphoreTrait {
- static Semaphore* Create() {
- return OS::CreateSemaphore(InitialValue);
- }
-};
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-// // The following semaphore starts at 0.
-// static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-// void my_function() {
-// // Do something with my_semaphore.Pointer().
-// }
-//
-template <int InitialValue>
-struct LazySemaphore {
- typedef typename LazyDynamicInstance<
- Semaphore, CreateSemaphoreTrait<InitialValue>,
- ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-
-// ----------------------------------------------------------------------------
// Socket
//
@@ -712,96 +731,6 @@ class Socket {
};
-// ----------------------------------------------------------------------------
-// Sampler
-//
-// A sampler periodically samples the state of the VM and optionally
-// (if used for profiling) the program counter and stack pointer for
-// the thread that created it.
-
-// TickSample captures the information collected for each sample.
-class TickSample {
- public:
- TickSample()
- : state(OTHER),
- pc(NULL),
- sp(NULL),
- fp(NULL),
- tos(NULL),
- frames_count(0),
- has_external_callback(false) {}
- StateTag state; // The state of the VM.
- Address pc; // Instruction pointer.
- Address sp; // Stack pointer.
- Address fp; // Frame pointer.
- union {
- Address tos; // Top stack value (*sp).
- Address external_callback;
- };
- static const int kMaxFramesCount = 64;
- Address stack[kMaxFramesCount]; // Call stack.
- int frames_count : 8; // Number of captured frames.
- bool has_external_callback : 1;
-};
-
-class Sampler {
- public:
- // Initialize sampler.
- Sampler(Isolate* isolate, int interval);
- virtual ~Sampler();
-
- int interval() const { return interval_; }
-
- // Performs stack sampling.
- void SampleStack(TickSample* sample) {
- DoSampleStack(sample);
- IncSamplesTaken();
- }
-
- // This method is called for each sampling period with the current
- // program counter.
- virtual void Tick(TickSample* sample) = 0;
-
- // Start and stop sampler.
- void Start();
- void Stop();
-
- // Is the sampler used for profiling?
- bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
- void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
- void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
-
- // Whether the sampler is running (that is, consumes resources).
- bool IsActive() const { return NoBarrier_Load(&active_); }
-
- Isolate* isolate() { return isolate_; }
-
- // Used in tests to make sure that stack sampling is performed.
- int samples_taken() const { return samples_taken_; }
- void ResetSamplesTaken() { samples_taken_ = 0; }
-
- class PlatformData;
- PlatformData* data() { return data_; }
-
- PlatformData* platform_data() { return data_; }
-
- protected:
- virtual void DoSampleStack(TickSample* sample) = 0;
-
- private:
- void SetActive(bool value) { NoBarrier_Store(&active_, value); }
- void IncSamplesTaken() { if (++samples_taken_ < 0) samples_taken_ = 0; }
-
- Isolate* isolate_;
- const int interval_;
- Atomic32 profiling_;
- Atomic32 active_;
- PlatformData* data_; // Platform specific data.
- int samples_taken_; // Counts stack samples taken.
- DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
-};
-
-
} } // namespace v8::internal
#endif // V8_PLATFORM_H_
diff --git a/deps/v8/src/preparse-data.cc b/deps/v8/src/preparse-data.cc
index d0425b4b2..287ad6698 100644
--- a/deps/v8/src/preparse-data.cc
+++ b/deps/v8/src/preparse-data.cc
@@ -95,7 +95,7 @@ Vector<unsigned> PartialParserRecorder::ExtractData() {
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = 0;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
@@ -151,7 +151,7 @@ Vector<unsigned> CompleteParserRecorder::ExtractData() {
Vector<unsigned> data = Vector<unsigned>::New(total_size);
preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
preamble_[PreparseDataConstants::kSymbolCountOffset] = symbol_id_;
- memcpy(data.start(), preamble_, sizeof(preamble_));
+ OS::MemCopy(data.start(), preamble_, sizeof(preamble_));
int symbol_start = PreparseDataConstants::kHeaderSize + function_size;
if (function_size > 0) {
function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
diff --git a/deps/v8/src/preparser-api.cc b/deps/v8/src/preparser-api.cc
index 6e8556aa1..462dfe229 100644
--- a/deps/v8/src/preparser-api.cc
+++ b/deps/v8/src/preparser-api.cc
@@ -100,9 +100,9 @@ class InputStreamUtf16Buffer : public Utf16CharacterStream {
// Hit the bottom of the allocated pushback buffer.
// Double the buffer and continue.
uc16* new_buffer = NewArray<uc16>(pushback_buffer_backing_size_ * 2);
- memcpy(new_buffer + pushback_buffer_backing_size_,
- pushback_buffer_backing_,
- pushback_buffer_backing_size_);
+ OS::MemCopy(new_buffer + pushback_buffer_backing_size_,
+ pushback_buffer_backing_,
+ pushback_buffer_backing_size_);
DeleteArray(pushback_buffer_backing_);
buffer_cursor_ = new_buffer + pushback_buffer_backing_size_;
pushback_buffer_backing_ = pushback_buffer_ = new_buffer;
@@ -168,16 +168,6 @@ class InputStreamUtf16Buffer : public Utf16CharacterStream {
unsigned pushback_buffer_backing_size_;
};
-
-// Functions declared by allocation.h and implemented in both api.cc (for v8)
-// or here (for a stand-alone preparser).
-
-void FatalProcessOutOfMemory(const char* reason) {
- V8_Fatal(__FILE__, __LINE__, reason);
-}
-
-bool EnableSlowAsserts() { return true; }
-
} // namespace internal.
@@ -191,11 +181,9 @@ PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
internal::Scanner scanner(&unicode_cache);
scanner.Initialize(&buffer);
internal::CompleteParserRecorder recorder;
- preparser::PreParser::PreParseResult result =
- preparser::PreParser::PreParseProgram(&scanner,
- &recorder,
- internal::kAllowLazy,
- stack_limit);
+ preparser::PreParser preparser(&scanner, &recorder, stack_limit);
+ preparser.set_allow_lazy(true);
+ preparser::PreParser::PreParseResult result = preparser.PreParseProgram();
if (result == preparser::PreParser::kPreParseStackOverflow) {
return PreParserData::StackOverflow();
}
@@ -206,9 +194,3 @@ PreParserData Preparse(UnicodeInputStream* input, size_t max_stack) {
}
} // namespace v8.
-
-
-// Used by ASSERT macros and other immediate exits.
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
- exit(EXIT_FAILURE);
-}
diff --git a/deps/v8/src/preparser.cc b/deps/v8/src/preparser.cc
index c61a08db9..6e8800e01 100644
--- a/deps/v8/src/preparser.cc
+++ b/deps/v8/src/preparser.cc
@@ -179,7 +179,7 @@ PreParser::SourceElements PreParser::ParseSourceElements(int end_token,
Statement statement = ParseSourceElement(CHECK_OK);
if (allow_directive_prologue) {
if (statement.IsUseStrictLiteral()) {
- set_language_mode(harmony_scoping_ ?
+ set_language_mode(allow_harmony_scoping() ?
i::EXTENDED_MODE : i::STRICT_MODE);
} else if (!statement.IsStringLiteral()) {
allow_directive_prologue = false;
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index ce9aa7546..e3a036f15 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -119,11 +119,7 @@ class PreParser {
PreParser(i::Scanner* scanner,
i::ParserRecorder* log,
- uintptr_t stack_limit,
- bool allow_lazy,
- bool allow_natives_syntax,
- bool allow_modules,
- bool allow_generators)
+ uintptr_t stack_limit)
: scanner_(scanner),
log_(log),
scope_(NULL),
@@ -131,30 +127,43 @@ class PreParser {
strict_mode_violation_location_(i::Scanner::Location::invalid()),
strict_mode_violation_type_(NULL),
stack_overflow_(false),
- allow_lazy_(allow_lazy),
- allow_modules_(allow_modules),
- allow_natives_syntax_(allow_natives_syntax),
- allow_generators_(allow_generators),
- parenthesized_function_(false),
- harmony_scoping_(scanner->HarmonyScoping()) { }
+ allow_lazy_(false),
+ allow_natives_syntax_(false),
+ allow_generators_(false),
+ parenthesized_function_(false) { }
~PreParser() {}
+ bool allow_natives_syntax() const { return allow_natives_syntax_; }
+ bool allow_lazy() const { return allow_lazy_; }
+ bool allow_modules() const { return scanner_->HarmonyModules(); }
+ bool allow_harmony_scoping() const { return scanner_->HarmonyScoping(); }
+ bool allow_generators() const { return allow_generators_; }
+
+ void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
+ void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
+ void set_allow_modules(bool allow) { scanner_->SetHarmonyModules(allow); }
+ void set_allow_harmony_scoping(bool allow) {
+ scanner_->SetHarmonyScoping(allow);
+ }
+ void set_allow_generators(bool allow) { allow_generators_ = allow; }
+
// Pre-parse the program from the character stream; returns true on
// success (even if parsing failed, the pre-parse data successfully
// captured the syntax error), and false if a stack-overflow happened
// during parsing.
- static PreParseResult PreParseProgram(i::Scanner* scanner,
- i::ParserRecorder* log,
- int flags,
- uintptr_t stack_limit) {
- bool allow_lazy = (flags & i::kAllowLazy) != 0;
- bool allow_natives_syntax = (flags & i::kAllowNativesSyntax) != 0;
- bool allow_modules = (flags & i::kAllowModules) != 0;
- bool allow_generators = (flags & i::kAllowGenerators) != 0;
- return PreParser(scanner, log, stack_limit, allow_lazy,
- allow_natives_syntax, allow_modules,
- allow_generators).PreParse();
+ PreParseResult PreParseProgram() {
+ Scope top_scope(&scope_, kTopLevelScope);
+ bool ok = true;
+ int start_position = scanner_->peek_location().beg_pos;
+ ParseSourceElements(i::Token::EOS, &ok);
+ if (stack_overflow_) return kPreParseStackOverflow;
+ if (!ok) {
+ ReportUnexpectedToken(scanner_->current_token());
+ } else if (!scope_->is_classic_mode()) {
+ CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
+ }
+ return kPreParseSuccess;
}
// Parses a single function literal, from the opening parentheses before
@@ -514,22 +523,6 @@ class PreParser {
bool is_generator_;
};
- // Preparse the program. Only called in PreParseProgram after creating
- // the instance.
- PreParseResult PreParse() {
- Scope top_scope(&scope_, kTopLevelScope);
- bool ok = true;
- int start_position = scanner_->peek_location().beg_pos;
- ParseSourceElements(i::Token::EOS, &ok);
- if (stack_overflow_) return kPreParseStackOverflow;
- if (!ok) {
- ReportUnexpectedToken(scanner_->current_token());
- } else if (!scope_->is_classic_mode()) {
- CheckOctalLiteral(start_position, scanner_->location().end_pos, &ok);
- }
- return kPreParseSuccess;
- }
-
// Report syntax error
void ReportUnexpectedToken(i::Token::Value token);
void ReportMessageAt(i::Scanner::Location location,
@@ -683,11 +676,9 @@ class PreParser {
const char* strict_mode_violation_type_;
bool stack_overflow_;
bool allow_lazy_;
- bool allow_modules_;
bool allow_natives_syntax_;
bool allow_generators_;
bool parenthesized_function_;
- bool harmony_scoping_;
};
} } // v8::preparser
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 50a71ced3..4660c0fde 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -505,7 +505,7 @@ void PrettyPrinter::Print(const char* format, ...) {
const int slack = 32;
int new_size = size_ + (size_ >> 1) + slack;
char* new_output = NewArray<char>(new_size);
- memcpy(new_output, output_, pos_);
+ OS::MemCopy(new_output, output_, pos_);
DeleteArray(output_);
output_ = new_output;
size_ = new_size;
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index ce07213b0..eacabeff4 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -894,20 +894,12 @@ void ProfileGenerator::RecordTickSample(const TickSample& sample) {
if (sample.pc != NULL) {
*entry++ = code_map_.FindEntry(sample.pc);
- if (sample.has_external_callback) {
+ if (sample.external_callback) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself.
*(entries.start()) = NULL;
*entry++ = code_map_.FindEntry(sample.external_callback);
- } else if (sample.tos != NULL) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- *entry = code_map_.FindEntry(sample.tos);
- if (*entry != NULL && !(*entry)->is_js_function()) {
- *entry = NULL;
- }
- entry++;
}
for (const Address* stack_pos = sample.stack,
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index b16de016a..528c47d80 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -27,9 +27,13 @@
"use strict";
-global.Proxy = new $Object();
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object;
-var $Proxy = global.Proxy
+var $Proxy = new $Object();
+
+// -------------------------------------------------------------------
function ProxyCreate(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
@@ -62,16 +66,26 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
handler, callTrap, constructTrap, $Function.prototype)
}
-%CheckIsBootstrapping()
-InstallFunctions($Proxy, DONT_ENUM, [
- "create", ProxyCreate,
- "createFunction", ProxyCreateFunction
-])
+
+// -------------------------------------------------------------------
+
+function SetUpProxy() {
+ %CheckIsBootstrapping()
+
+ global.Proxy = $Proxy;
+
+ // Set up non-enumerable properties of the Proxy object.
+ InstallFunctions($Proxy, DONT_ENUM, [
+ "create", ProxyCreate,
+ "createFunction", ProxyCreateFunction
+ ])
+}
+
+SetUpProxy();
-////////////////////////////////////////////////////////////////////////////////
-// Builtins
-////////////////////////////////////////////////////////////////////////////////
+// -------------------------------------------------------------------
+// Proxy Builtins
function DerivedConstructTrap(callTrap) {
return function() {
diff --git a/deps/v8/src/regexp-macro-assembler-irregexp.cc b/deps/v8/src/regexp-macro-assembler-irregexp.cc
index 16766cab0..e678d607a 100644
--- a/deps/v8/src/regexp-macro-assembler-irregexp.cc
+++ b/deps/v8/src/regexp-macro-assembler-irregexp.cc
@@ -479,7 +479,7 @@ int RegExpMacroAssemblerIrregexp::length() {
void RegExpMacroAssemblerIrregexp::Copy(Address a) {
- memcpy(a, buffer_.start(), length());
+ OS::MemCopy(a, buffer_.start(), length());
}
@@ -488,7 +488,7 @@ void RegExpMacroAssemblerIrregexp::Expand() {
Vector<byte> old_buffer = buffer_;
buffer_ = Vector<byte>::New(old_buffer.length() * 2);
own_buffer_ = true;
- memcpy(buffer_.start(), old_buffer.start(), old_buffer.length());
+ OS::MemCopy(buffer_.start(), old_buffer.start(), old_buffer.length());
if (old_buffer_was_our_own) {
old_buffer.Dispose();
}
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index 325a1496c..fc4114af5 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -57,9 +57,7 @@ RegExpStack::~RegExpStack() {
char* RegExpStack::ArchiveStack(char* to) {
size_t size = sizeof(thread_local_);
- memcpy(reinterpret_cast<void*>(to),
- &thread_local_,
- size);
+ OS::MemCopy(reinterpret_cast<void*>(to), &thread_local_, size);
thread_local_ = ThreadLocal();
return to + size;
}
@@ -67,7 +65,7 @@ char* RegExpStack::ArchiveStack(char* to) {
char* RegExpStack::RestoreStack(char* from) {
size_t size = sizeof(thread_local_);
- memcpy(&thread_local_, reinterpret_cast<void*>(from), size);
+ OS::MemCopy(&thread_local_, reinterpret_cast<void*>(from), size);
return from + size;
}
@@ -95,10 +93,11 @@ Address RegExpStack::EnsureCapacity(size_t size) {
Address new_memory = NewArray<byte>(static_cast<int>(size));
if (thread_local_.memory_size_ > 0) {
// Copy original memory into top of new memory.
- memcpy(reinterpret_cast<void*>(
- new_memory + size - thread_local_.memory_size_),
- reinterpret_cast<void*>(thread_local_.memory_),
- thread_local_.memory_size_);
+ OS::MemCopy(
+ reinterpret_cast<void*>(
+ new_memory + size - thread_local_.memory_size_),
+ reinterpret_cast<void*>(thread_local_.memory_),
+ thread_local_.memory_size_);
DeleteArray(thread_local_.memory_);
}
thread_local_.memory_ = new_memory;
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index 2349ca7fb..cb11ad107 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -25,11 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Expect $Object = global.Object;
-// Expect $Array = global.Array;
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Object = global.Object;
+// var $Array = global.Array;
var $RegExp = global.RegExp;
+// -------------------------------------------------------------------
+
// A recursive descent parser for Patterns according to the grammar of
// ECMA-262 15.10.1, with deviations noted below.
function DoConstructRegExp(object, pattern, flags) {
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 2606f8ab3..752d79c98 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -86,17 +86,6 @@ static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kBackEdgeDistanceUnit;
-Atomic32 RuntimeProfiler::state_ = 0;
-
-// TODO(isolates): Clean up the semaphore when it is no longer required.
-static LazySemaphore<0>::type semaphore = LAZY_SEMAPHORE_INITIALIZER;
-
-#ifdef DEBUG
-bool RuntimeProfiler::has_been_globally_set_up_ = false;
-#endif
-bool RuntimeProfiler::enabled_ = false;
-
-
RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
: isolate_(isolate),
sampler_threshold_(kSamplerThresholdInit),
@@ -110,15 +99,6 @@ RuntimeProfiler::RuntimeProfiler(Isolate* isolate)
}
-void RuntimeProfiler::GlobalSetUp() {
- ASSERT(!has_been_globally_set_up_);
- enabled_ = V8::UseCrankshaft() && FLAG_opt;
-#ifdef DEBUG
- has_been_globally_set_up_ = true;
-#endif
-}
-
-
static void GetICCounts(JSFunction* function,
int* ic_with_type_info_count,
int* ic_total_count,
@@ -190,23 +170,22 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// any back edge in any unoptimized frame will trigger on-stack
// replacement for that frame.
if (FLAG_trace_osr) {
- PrintF("[patching stack checks in ");
+ PrintF("[patching back edges in ");
function->PrintName();
PrintF(" for on-stack replacement]\n");
}
- // Get the stack check stub code object to match against. We aren't
+ // Get the interrupt stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
- Code* stack_check_code = NULL;
+ Code* interrupt_code = NULL;
InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
+ bool found_code = interrupt_stub.FindCodeInCache(&interrupt_code, isolate_);
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
Code* unoptimized_code = shared->code();
- Deoptimizer::PatchStackCheckCode(unoptimized_code,
- stack_check_code,
- replacement_code);
+ Deoptimizer::PatchInterruptCode(
+ unoptimized_code, interrupt_code, replacement_code);
}
}
@@ -296,9 +275,11 @@ void RuntimeProfiler::OptimizeNow() {
function->IsMarkedForParallelRecompilation() ||
function->IsOptimized())) {
int nesting = shared_code->allow_osr_at_loop_nesting_level();
- if (nesting == 0) AttemptOnStackReplacement(function);
- int new_nesting = Min(nesting + 1, Code::kMaxLoopNestingMarker);
- shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ if (nesting < Code::kMaxLoopNestingMarker) {
+ int new_nesting = nesting + 1;
+ shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
+ AttemptOnStackReplacement(function);
+ }
}
// Only record top-level code on top of the execution stack and
@@ -385,13 +366,9 @@ void RuntimeProfiler::OptimizeNow() {
void RuntimeProfiler::SetUp() {
- ASSERT(has_been_globally_set_up_);
if (!FLAG_watch_ic_patching) {
ClearSampleBuffer();
}
- // If the ticker hasn't already started, make sure to do so to get
- // the ticks for the runtime profiler.
- if (IsEnabled()) isolate_->logger()->EnsureTickerStarted();
}
@@ -431,48 +408,6 @@ void RuntimeProfiler::UpdateSamplesAfterScavenge() {
}
-void RuntimeProfiler::HandleWakeUp(Isolate* isolate) {
- // The profiler thread must still be waiting.
- ASSERT(NoBarrier_Load(&state_) >= 0);
- // In IsolateEnteredJS we have already incremented the counter and
- // undid the decrement done by the profiler thread. Increment again
- // to get the right count of active isolates.
- NoBarrier_AtomicIncrement(&state_, 1);
- semaphore.Pointer()->Signal();
-}
-
-
-bool RuntimeProfiler::WaitForSomeIsolateToEnterJS() {
- Atomic32 old_state = NoBarrier_CompareAndSwap(&state_, 0, -1);
- ASSERT(old_state >= -1);
- if (old_state != 0) return false;
- semaphore.Pointer()->Wait();
- return true;
-}
-
-
-void RuntimeProfiler::StopRuntimeProfilerThreadBeforeShutdown(Thread* thread) {
- // Do a fake increment. If the profiler is waiting on the semaphore,
- // the returned state is 0, which can be left as an initial state in
- // case profiling is restarted later. If the profiler is not
- // waiting, the increment will prevent it from waiting, but has to
- // be undone after the profiler is stopped.
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- ASSERT(new_state >= 0);
- if (new_state == 0) {
- // The profiler thread is waiting. Wake it up. It must check for
- // stop conditions before attempting to wait again.
- semaphore.Pointer()->Signal();
- }
- thread->Join();
- // The profiler thread is now stopped. Undo the increment in case it
- // was not waiting.
- if (new_state != 0) {
- NoBarrier_AtomicIncrement(&state_, -1);
- }
-}
-
-
void RuntimeProfiler::RemoveDeadSamples() {
for (int i = 0; i < kSamplerWindowSize; i++) {
Object* function = sampler_window_[i];
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 62c48c7a4..1bf9aa878 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -43,13 +43,6 @@ class RuntimeProfiler {
public:
explicit RuntimeProfiler(Isolate* isolate);
- static void GlobalSetUp();
-
- static inline bool IsEnabled() {
- ASSERT(has_been_globally_set_up_);
- return enabled_;
- }
-
void OptimizeNow();
void SetUp();
@@ -63,26 +56,6 @@ class RuntimeProfiler {
// Rate limiting support.
- // VM thread interface.
- //
- // Called by isolates when their states change.
- static inline void IsolateEnteredJS(Isolate* isolate);
- static inline void IsolateExitedJS(Isolate* isolate);
-
- // Profiler thread interface.
- //
- // WaitForSomeIsolateToEnterJS():
- // When no isolates are running JavaScript code for some time the
- // profiler thread suspends itself by calling the wait function. The
- // wait function returns true after it waited or false immediately.
- // While the function was waiting the profiler may have been
- // disabled so it *must check* whether it is allowed to continue.
- static bool WaitForSomeIsolateToEnterJS();
-
- // Stops the runtime profiler thread when profiling support is being
- // turned off.
- static void StopRuntimeProfilerThreadBeforeShutdown(Thread* thread);
-
void UpdateSamplesAfterScavenge();
void RemoveDeadSamples();
void UpdateSamplesAfterCompact(ObjectVisitor* visitor);
@@ -92,8 +65,6 @@ class RuntimeProfiler {
private:
static const int kSamplerWindowSize = 16;
- static void HandleWakeUp(Isolate* isolate);
-
void Optimize(JSFunction* function, const char* reason);
void ClearSampleBuffer();
@@ -116,39 +87,8 @@ class RuntimeProfiler {
bool any_ic_changed_;
bool code_generated_;
-
- // Possible state values:
- // -1 => the profiler thread is waiting on the semaphore
- // 0 or positive => the number of isolates running JavaScript code.
- static Atomic32 state_;
-
-#ifdef DEBUG
- static bool has_been_globally_set_up_;
-#endif
- static bool enabled_;
};
-
-// Implementation of RuntimeProfiler inline functions.
-
-void RuntimeProfiler::IsolateEnteredJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, 1);
- if (new_state == 0) {
- // Just incremented from -1 to 0. -1 can only be set by the
- // profiler thread before it suspends itself and starts waiting on
- // the semaphore.
- HandleWakeUp(isolate);
- }
- ASSERT(new_state >= 0);
-}
-
-
-void RuntimeProfiler::IsolateExitedJS(Isolate* isolate) {
- Atomic32 new_state = NoBarrier_AtomicIncrement(&state_, -1);
- ASSERT(new_state >= 0);
- USE(new_state);
-}
-
} } // namespace v8::internal
#endif // V8_RUNTIME_PROFILER_H_
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 78efc8dbe..7e7d6d5d5 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -43,6 +43,7 @@
#include "deoptimizer.h"
#include "date.h"
#include "execution.h"
+#include "full-codegen.h"
#include "global-handles.h"
#include "isolate-inl.h"
#include "jsregexp.h"
@@ -60,6 +61,7 @@
#include "string-search.h"
#include "stub-cache.h"
#include "uri.h"
+#include "v8conversions.h"
#include "v8threads.h"
#include "vm-state-inl.h"
@@ -637,19 +639,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Fix) {
}
-static size_t ArrayBufferAllocatedLength(Isolate* isolate,
- JSArrayBuffer* buffer) {
- NoHandleAllocation hc(isolate);
- Object* byte_length = buffer->byte_length();
- if (byte_length->IsSmi()) {
- return Smi::cast(byte_length)->value();
- } else {
- double value = HeapNumber::cast(byte_length)->value();
- return static_cast<size_t>(value);
- }
-}
-
-
static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
Persistent<Value> object,
void* data) {
@@ -657,8 +646,8 @@ static void ArrayBufferWeakCallback(v8::Isolate* external_isolate,
HandleScope scope(isolate);
Handle<Object> internal_object = Utils::OpenHandle(*object);
- size_t allocated_length = ArrayBufferAllocatedLength(
- isolate, JSArrayBuffer::cast(*internal_object));
+ size_t allocated_length = NumberToSize(
+ isolate, JSArrayBuffer::cast(*internal_object)->byte_length());
isolate->heap()->AdjustAmountOfExternalAllocatedMemory(
-static_cast<intptr_t>(allocated_length));
if (data != NULL)
@@ -743,12 +732,12 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
CONVERT_DOUBLE_ARG_CHECKED(first, 2);
size_t start = static_cast<size_t>(first);
- size_t target_length = ArrayBufferAllocatedLength(isolate, *target);
+ size_t target_length = NumberToSize(isolate, target->byte_length());
if (target_length == 0)
return isolate->heap()->undefined_value();
- ASSERT(ArrayBufferAllocatedLength(isolate, *source) - target_length >= start);
+ ASSERT(NumberToSize(isolate, source->byte_length()) - target_length >= start);
uint8_t* source_data = reinterpret_cast<uint8_t*>(source->backing_store());
uint8_t* target_data = reinterpret_cast<uint8_t*>(target->backing_store());
CopyBytes(target_data, source_data + start, target_length);
@@ -756,6 +745,116 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayBufferSliceImpl) {
}
+enum TypedArrayId {
+ // arrayIds below should be synchromized with typedarray.js natives.
+ ARRAY_ID_UINT8 = 1,
+ ARRAY_ID_INT8 = 2,
+ ARRAY_ID_UINT16 = 3,
+ ARRAY_ID_INT16 = 4,
+ ARRAY_ID_UINT32 = 5,
+ ARRAY_ID_INT32 = 6,
+ ARRAY_ID_FLOAT32 = 7,
+ ARRAY_ID_FLOAT64 = 8
+};
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayInitialize) {
+ HandleScope scope(isolate);
+ ASSERT(args.length() == 5);
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
+ CONVERT_SMI_ARG_CHECKED(arrayId, 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 2);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_offset_object, 3);
+ CONVERT_ARG_HANDLE_CHECKED(Object, byte_length_object, 4);
+
+ ExternalArrayType arrayType;
+ ElementsKind elementsKind;
+ size_t elementSize;
+ switch (arrayId) {
+ case ARRAY_ID_UINT8:
+ elementsKind = EXTERNAL_UNSIGNED_BYTE_ELEMENTS;
+ arrayType = kExternalUnsignedByteArray;
+ elementSize = 1;
+ break;
+ case ARRAY_ID_INT8:
+ elementsKind = EXTERNAL_BYTE_ELEMENTS;
+ arrayType = kExternalByteArray;
+ elementSize = 1;
+ break;
+ case ARRAY_ID_UINT16:
+ elementsKind = EXTERNAL_UNSIGNED_SHORT_ELEMENTS;
+ arrayType = kExternalUnsignedShortArray;
+ elementSize = 2;
+ break;
+ case ARRAY_ID_INT16:
+ elementsKind = EXTERNAL_SHORT_ELEMENTS;
+ arrayType = kExternalShortArray;
+ elementSize = 2;
+ break;
+ case ARRAY_ID_UINT32:
+ elementsKind = EXTERNAL_UNSIGNED_INT_ELEMENTS;
+ arrayType = kExternalUnsignedIntArray;
+ elementSize = 4;
+ break;
+ case ARRAY_ID_INT32:
+ elementsKind = EXTERNAL_INT_ELEMENTS;
+ arrayType = kExternalIntArray;
+ elementSize = 4;
+ break;
+ case ARRAY_ID_FLOAT32:
+ elementsKind = EXTERNAL_FLOAT_ELEMENTS;
+ arrayType = kExternalFloatArray;
+ elementSize = 4;
+ break;
+ case ARRAY_ID_FLOAT64:
+ elementsKind = EXTERNAL_DOUBLE_ELEMENTS;
+ arrayType = kExternalDoubleArray;
+ elementSize = 8;
+ break;
+ default:
+ UNREACHABLE();
+ return NULL;
+ }
+
+ holder->set_buffer(*buffer);
+ holder->set_byte_offset(*byte_offset_object);
+ holder->set_byte_length(*byte_length_object);
+
+ size_t byte_offset = NumberToSize(isolate, *byte_offset_object);
+ size_t byte_length = NumberToSize(isolate, *byte_length_object);
+ ASSERT(byte_length % elementSize == 0);
+ size_t length = byte_length / elementSize;
+
+ Handle<Object> length_obj =
+ isolate->factory()->NewNumber(static_cast<double>(length));
+ holder->set_length(*length_obj);
+ Handle<ExternalArray> elements =
+ isolate->factory()->NewExternalArray(
+ static_cast<int>(length), arrayType,
+ static_cast<uint8_t*>(buffer->backing_store()) + byte_offset);
+ Handle<Map> map =
+ isolate->factory()->GetElementsTransitionMap(holder, elementsKind);
+ holder->set_map(*map);
+ holder->set_elements(*elements);
+ return isolate->heap()->undefined_value();
+}
+
+
+#define TYPED_ARRAY_GETTER(getter, accessor) \
+ RUNTIME_FUNCTION(MaybeObject*, Runtime_TypedArrayGet##getter) { \
+ HandleScope scope(isolate); \
+ ASSERT(args.length() == 1); \
+ CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0); \
+ return holder->accessor(); \
+ }
+
+TYPED_ARRAY_GETTER(Buffer, buffer)
+TYPED_ARRAY_GETTER(ByteLength, byte_length)
+TYPED_ARRAY_GETTER(ByteOffset, byte_offset)
+TYPED_ARRAY_GETTER(Length, length)
+
+#undef TYPED_ARRAY_GETTER
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -956,8 +1055,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ClassOf) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
- Object* obj = input_obj;
+ CONVERT_ARG_CHECKED(Object, obj, 0);
// We don't expect access checks to be needed on JSProxy objects.
ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
do {
@@ -975,12 +1073,43 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetPrototype) {
}
+static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
+ Object* receiver) {
+ Object* current = receiver->GetPrototype(isolate);
+ while (current->IsJSObject() &&
+ JSObject::cast(current)->map()->is_hidden_prototype()) {
+ current = current->GetPrototype(isolate);
+ }
+ return current;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_SetPrototype) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, input_obj, 0);
+ CONVERT_ARG_CHECKED(JSObject, obj, 0);
CONVERT_ARG_CHECKED(Object, prototype, 1);
- return input_obj->SetPrototype(prototype, true);
+ if (FLAG_harmony_observation && obj->map()->is_observed()) {
+ HandleScope scope(isolate);
+ Handle<JSObject> receiver(obj);
+ Handle<Object> value(prototype, isolate);
+ Handle<Object> old_value(
+ GetPrototypeSkipHiddenPrototypes(isolate, *receiver), isolate);
+
+ MaybeObject* result = receiver->SetPrototype(*value, true);
+ Handle<Object> hresult;
+ if (!result->ToHandle(&hresult, isolate)) return result;
+
+ Handle<Object> new_value(
+ GetPrototypeSkipHiddenPrototypes(isolate, *receiver), isolate);
+ if (!new_value->SameValue(*old_value)) {
+ JSObject::EnqueueChangeRecord(receiver, "prototype",
+ isolate->factory()->proto_string(),
+ old_value);
+ }
+ return *hresult;
+ }
+ return obj->SetPrototype(prototype, true);
}
@@ -1918,6 +2047,24 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SpecialArrayFunctions) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_IsClassicModeFunction) {
+ NoHandleAllocation ha(isolate);
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
+ if (!callable->IsJSFunction()) {
+ HandleScope scope(isolate);
+ bool threw = false;
+ Handle<Object> delegate =
+ Execution::TryGetFunctionDelegate(Handle<JSReceiver>(callable), &threw);
+ if (threw) return Failure::Exception();
+ callable = JSFunction::cast(*delegate);
+ }
+ JSFunction* function = JSFunction::cast(callable);
+ SharedFunctionInfo* shared = function->shared();
+ return isolate->heap()->ToBoolean(shared->is_classic_mode());
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDefaultReceiver) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
@@ -2243,6 +2390,31 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetExpectedNumberOfProperties) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateJSGeneratorObject) {
+ NoHandleAllocation ha(isolate);
+ ASSERT(args.length() == 0);
+ JavaScriptFrameIterator it(isolate);
+ JavaScriptFrame* frame = it.frame();
+ JSFunction* function = JSFunction::cast(frame->function());
+ RUNTIME_ASSERT(function->shared()->is_generator());
+
+ JSGeneratorObject* generator;
+ if (frame->IsConstructor()) {
+ generator = JSGeneratorObject::cast(frame->receiver());
+ } else {
+ MaybeObject* maybe_generator =
+ isolate->heap()->AllocateJSGeneratorObject(function);
+ if (!maybe_generator->To(&generator)) return maybe_generator;
+ }
+ generator->set_function(function);
+ generator->set_context(isolate->heap()->undefined_value());
+ generator->set_continuation(0);
+ generator->set_operand_stack(isolate->heap()->empty_fixed_array());
+
+ return generator;
+}
+
+
MUST_USE_RESULT static MaybeObject* CharFromCode(Isolate* isolate,
Object* char_code) {
uint32_t code;
@@ -3898,6 +4070,33 @@ MaybeObject* Runtime::GetElementOrCharAt(Isolate* isolate,
}
+MaybeObject* Runtime::HasObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key) {
+ HandleScope scope(isolate);
+
+ // Check if the given key is an array index.
+ uint32_t index;
+ if (key->ToArrayIndex(&index)) {
+ return isolate->heap()->ToBoolean(object->HasElement(index));
+ }
+
+ // Convert the key to a name - possibly by calling back into JavaScript.
+ Handle<Name> name;
+ if (key->IsName()) {
+ name = Handle<Name>::cast(key);
+ } else {
+ bool has_pending_exception = false;
+ Handle<Object> converted =
+ Execution::ToString(key, &has_pending_exception);
+ if (has_pending_exception) return Failure::Exception();
+ name = Handle<Name>::cast(converted);
+ }
+
+ return isolate->heap()->ToBoolean(object->HasProperty(*name));
+}
+
+
MaybeObject* Runtime::GetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key) {
@@ -4109,14 +4308,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
if (callback->IsAccessorInfo()) {
return isolate->heap()->undefined_value();
}
- // TODO(mstarzinger): The __proto__ property should actually be a real
- // JavaScript accessor instead of a foreign callback. But for now we just
- // avoid changing the writability and configurability attribute of this
- // property.
- Handle<Name> proto_string = isolate->factory()->proto_string();
- if (callback->IsForeign() && proto_string->Equals(*name)) {
- attr = static_cast<PropertyAttributes>(attr & ~(READ_ONLY | DONT_DELETE));
- }
// Avoid redefining foreign callback as data property, just use the stored
// setter to update the value instead.
// TODO(mstarzinger): So far this only works if property attributes don't
@@ -4323,9 +4514,10 @@ MaybeObject* Runtime::ForceSetObjectProperty(Isolate* isolate,
}
-MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
- Handle<JSReceiver> receiver,
- Handle<Object> key) {
+MaybeObject* Runtime::DeleteObjectProperty(Isolate* isolate,
+ Handle<JSReceiver> receiver,
+ Handle<Object> key,
+ JSReceiver::DeleteMode mode) {
HandleScope scope(isolate);
// Check if the given key is an array index.
@@ -4341,7 +4533,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
return isolate->heap()->true_value();
}
- return receiver->DeleteElement(index, JSReceiver::FORCE_DELETION);
+ return receiver->DeleteElement(index, mode);
}
Handle<Name> name;
@@ -4356,7 +4548,7 @@ MaybeObject* Runtime::ForceDeleteObjectProperty(Isolate* isolate,
}
if (name->IsString()) Handle<String>::cast(name)->TryFlatten();
- return receiver->DeleteProperty(*name, JSReceiver::FORCE_DELETION);
+ return receiver->DeleteProperty(*name, mode);
}
@@ -4607,7 +4799,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HasLocalProperty) {
// Fast case: either the key is a real named property or it is not
// an array index and there are no interceptors or hidden
// prototypes.
- if (object->HasRealNamedProperty(key)) return isolate->heap()->true_value();
+ if (object->HasRealNamedProperty(isolate, key))
+ return isolate->heap()->true_value();
Map* map = object->map();
if (!key_is_array_index &&
!map->has_named_interceptor() &&
@@ -7568,9 +7761,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_OptimizeFunctionOnNextCall) {
unoptimized->kind() == Code::FUNCTION) {
CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
- isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
- unoptimized->set_allow_osr_at_loop_nesting_level(
- Code::kMaxLoopNestingMarker);
+ for (int i = 0; i <= Code::kMaxLoopNestingMarker; i++) {
+ unoptimized->set_allow_osr_at_loop_nesting_level(i);
+ isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
+ }
} else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("parallel"))) {
function->MarkForParallelRecompilation();
}
@@ -7663,25 +7857,28 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
ASSERT(frame->LookupCode() == *unoptimized);
ASSERT(unoptimized->contains(frame->pc()));
- // Use linear search of the unoptimized code's stack check table to find
+ // Use linear search of the unoptimized code's back edge table to find
// the AST id matching the PC.
Address start = unoptimized->instruction_start();
unsigned target_pc_offset = static_cast<unsigned>(frame->pc() - start);
- Address table_cursor = start + unoptimized->stack_check_table_offset();
+ Address table_cursor = start + unoptimized->back_edge_table_offset();
uint32_t table_length = Memory::uint32_at(table_cursor);
table_cursor += kIntSize;
+ uint8_t loop_depth = 0;
for (unsigned i = 0; i < table_length; ++i) {
// Table entries are (AST id, pc offset) pairs.
uint32_t pc_offset = Memory::uint32_at(table_cursor + kIntSize);
if (pc_offset == target_pc_offset) {
ast_id = BailoutId(static_cast<int>(Memory::uint32_at(table_cursor)));
+ loop_depth = Memory::uint8_at(table_cursor + 2 * kIntSize);
break;
}
- table_cursor += 2 * kIntSize;
+ table_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
ASSERT(!ast_id.IsNone());
if (FLAG_trace_osr) {
- PrintF("[replacing on-stack at AST id %d in ", ast_id.ToInt());
+ PrintF("[replacing on-stack at AST id %d, loop depth %d in ",
+ ast_id.ToInt(), loop_depth);
function->PrintName();
PrintF("]\n");
}
@@ -7709,18 +7906,18 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
}
}
- // Revert to the original stack checks in the original unoptimized code.
+ // Revert to the original interrupt calls in the original unoptimized code.
if (FLAG_trace_osr) {
- PrintF("[restoring original stack checks in ");
+ PrintF("[restoring original interrupt calls in ");
function->PrintName();
PrintF("]\n");
}
InterruptStub interrupt_stub;
- Handle<Code> check_code = interrupt_stub.GetCode(isolate);
+ Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
- Deoptimizer::RevertStackCheckCode(*unoptimized,
- *check_code,
- *replacement_code);
+ Deoptimizer::RevertInterruptCode(*unoptimized,
+ *interrupt_code,
+ *replacement_code);
// Allow OSR only at nesting level zero again.
unoptimized->set_allow_osr_at_loop_nesting_level(0);
@@ -8821,14 +9018,18 @@ class ArrayConcatVisitor {
storage_(Handle<FixedArray>::cast(
isolate->global_handles()->Create(*storage))),
index_offset_(0u),
- fast_elements_(fast_elements) { }
+ fast_elements_(fast_elements),
+ exceeds_array_limit_(false) { }
~ArrayConcatVisitor() {
clear_storage();
}
void visit(uint32_t i, Handle<Object> elm) {
- if (i >= JSObject::kMaxElementCount - index_offset_) return;
+ if (i > JSObject::kMaxElementCount - index_offset_) {
+ exceeds_array_limit_ = true;
+ return;
+ }
uint32_t index = index_offset_ + i;
if (fast_elements_) {
@@ -8863,6 +9064,10 @@ class ArrayConcatVisitor {
}
}
+ bool exceeds_array_limit() {
+ return exceeds_array_limit_;
+ }
+
Handle<JSArray> ToArray() {
Handle<JSArray> array = isolate_->factory()->NewJSArray(0);
Handle<Object> length =
@@ -8921,7 +9126,8 @@ class ArrayConcatVisitor {
// Index after last seen index. Always less than or equal to
// JSObject::kMaxElementCount.
uint32_t index_offset_;
- bool fast_elements_;
+ bool fast_elements_ : 1;
+ bool exceeds_array_limit_ : 1;
};
@@ -9477,6 +9683,11 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
}
}
+ if (visitor.exceeds_array_limit()) {
+ return isolate->Throw(
+ *isolate->factory()->NewRangeError("invalid_array_length",
+ HandleVector<Object>(NULL, 0)));
+ }
return *visitor.ToArray();
}
@@ -10733,14 +10944,14 @@ class ScopeIterator {
info.MarkAsEval();
info.SetContext(Handle<Context>(function_->context()));
}
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ if (Parser::Parse(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
} else {
// Function code
CompilationInfoWithZone info(shared_info);
- if (ParserApi::Parse(&info, kNoParsingFlags) && Scope::Analyze(&info)) {
+ if (Parser::Parse(&info) && Scope::Analyze(&info)) {
scope = info.function()->scope();
}
RetrieveScopeChain(scope, shared_info);
@@ -12015,11 +12226,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugConstructedBy) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugGetPrototype) {
NoHandleAllocation ha(isolate);
ASSERT(args.length() == 1);
-
CONVERT_ARG_CHECKED(JSObject, obj, 0);
-
- // Use the __proto__ accessor.
- return Accessors::ObjectPrototype.getter(obj, NULL);
+ return GetPrototypeSkipHiddenPrototypes(isolate, obj);
}
@@ -12786,7 +12994,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Log) {
String::FlatContent format_content = format->GetFlatContent();
RUNTIME_ASSERT(format_content.IsAscii());
Vector<const uint8_t> chars = format_content.ToOneByteVector();
- LOGGER->LogRuntime(isolate, Vector<const char>::cast(chars), elms);
+ isolate->logger()->LogRuntime(Vector<const char>::cast(chars), elms);
return isolate->heap()->undefined_value();
}
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index b16acd1d8..cbf70e95c 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -64,6 +64,7 @@ namespace internal {
F(ToFastProperties, 1, 1) \
F(FinishArrayPrototypeSetup, 1, 1) \
F(SpecialArrayFunctions, 1, 1) \
+ F(IsClassicModeFunction, 1, 1) \
F(GetDefaultReceiver, 1, 1) \
\
F(GetPrototype, 1, 1) \
@@ -218,6 +219,7 @@ namespace internal {
F(NumberToExponential, 2, 1) \
F(NumberToPrecision, 2, 1)
+
#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
/* Reflection */ \
F(FunctionSetInstanceClassName, 2, 1) \
@@ -294,6 +296,9 @@ namespace internal {
F(CreateArrayLiteral, 3, 1) \
F(CreateArrayLiteralShallow, 3, 1) \
\
+ /* Harmony generators */ \
+ F(CreateJSGeneratorObject, 0, 1) \
+ \
/* Harmony modules */ \
F(IsJSModule, 1, 1) \
\
@@ -346,6 +351,12 @@ namespace internal {
F(ArrayBufferGetByteLength, 1, 1)\
F(ArrayBufferSliceImpl, 3, 1) \
\
+ F(TypedArrayInitialize, 5, 1) \
+ F(TypedArrayGetBuffer, 1, 1) \
+ F(TypedArrayGetByteLength, 1, 1) \
+ F(TypedArrayGetByteOffset, 1, 1) \
+ F(TypedArrayGetLength, 1, 1) \
+ \
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewObject, 1, 1) \
@@ -698,7 +709,13 @@ class Runtime : public AllStatic {
Handle<Object> value,
PropertyAttributes attr);
- MUST_USE_RESULT static MaybeObject* ForceDeleteObjectProperty(
+ MUST_USE_RESULT static MaybeObject* DeleteObjectProperty(
+ Isolate* isolate,
+ Handle<JSReceiver> object,
+ Handle<Object> key,
+ JSReceiver::DeleteMode mode);
+
+ MUST_USE_RESULT static MaybeObject* HasObjectProperty(
Isolate* isolate,
Handle<JSReceiver> object,
Handle<Object> key);
diff --git a/deps/v8/src/sampler.cc b/deps/v8/src/sampler.cc
new file mode 100644
index 000000000..948b05407
--- /dev/null
+++ b/deps/v8/src/sampler.cc
@@ -0,0 +1,694 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__OpenBSD__) \
+ || defined(__NetBSD__) || defined(__sun) || defined(__ANDROID__)
+
+#define USE_SIGNALS
+
+#include <errno.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/time.h>
+#include <sys/syscall.h>
+#if !defined(__ANDROID__) || defined(__BIONIC_HAVE_UCONTEXT_T)
+#include <ucontext.h>
+#endif
+#include <unistd.h>
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+ defined(__arm__) && !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>
+#endif
+
+#elif defined(__MACH__)
+
+#include <mach/mach.h>
+
+#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+
+#include "win32-headers.h"
+
+#endif
+
+#include "v8.h"
+
+#include "frames-inl.h"
+#include "log.h"
+#include "platform.h"
+#include "simulator.h"
+#include "v8threads.h"
+
+
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+// Not all versions of Android's C library provide ucontext_t.
+// Detect this and provide custom but compatible definitions. Note that these
+// follow the GLibc naming convention to access register values from
+// mcontext_t.
+//
+// See http://code.google.com/p/android/issues/detail?id=34784
+
+#if defined(__arm__)
+
+typedef struct sigcontext mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__mips__)
+// MIPS version of sigcontext, for Android bionic.
+typedef struct {
+ uint32_t regmask;
+ uint32_t status;
+ uint64_t pc;
+ uint64_t gregs[32];
+ uint64_t fpregs[32];
+ uint32_t acx;
+ uint32_t fpc_csr;
+ uint32_t fpc_eir;
+ uint32_t used_math;
+ uint32_t dsp;
+ uint64_t mdhi;
+ uint64_t mdlo;
+ uint32_t hi1;
+ uint32_t lo1;
+ uint32_t hi2;
+ uint32_t lo2;
+ uint32_t hi3;
+ uint32_t lo3;
+} mcontext_t;
+
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+
+#elif defined(__i386__)
+// x86 version for Android.
+typedef struct {
+ uint32_t gregs[19];
+ void* fpregs;
+ uint32_t oldmask;
+ uint32_t cr2;
+} mcontext_t;
+
+typedef uint32_t kernel_sigset_t[2]; // x86 kernel uses 64-bit signal masks
+typedef struct ucontext {
+ uint32_t uc_flags;
+ struct ucontext* uc_link;
+ stack_t uc_stack;
+ mcontext_t uc_mcontext;
+ // Other fields are not used by V8, don't define them here.
+} ucontext_t;
+enum { REG_EBP = 6, REG_ESP = 7, REG_EIP = 14 };
+#endif
+
+#endif // __ANDROID__ && !defined(__BIONIC_HAVE_UCONTEXT_T)
+
+
+namespace v8 {
+namespace internal {
+
+#if defined(USE_SIGNALS)
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData()
+ : vm_tid_(pthread_self()),
+ profiled_thread_id_(ThreadId::Current()) {}
+
+ pthread_t vm_tid() const { return vm_tid_; }
+ ThreadId profiled_thread_id() { return profiled_thread_id_; }
+
+ private:
+ pthread_t vm_tid_;
+ ThreadId profiled_thread_id_;
+};
+
+
+class SignalHandler : public AllStatic {
+ public:
+ static inline void EnsureInstalled() {
+ if (signal_handler_installed_) return;
+ struct sigaction sa;
+ sa.sa_sigaction = &HandleProfilerSignal;
+ sigemptyset(&sa.sa_mask);
+ sa.sa_flags = SA_RESTART | SA_SIGINFO;
+ signal_handler_installed_ =
+ (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+ }
+
+ static inline void Restore() {
+ if (signal_handler_installed_) {
+ sigaction(SIGPROF, &old_signal_handler_, 0);
+ signal_handler_installed_ = false;
+ }
+ }
+
+ static inline bool Installed() {
+ return signal_handler_installed_;
+ }
+
+ private:
+ static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+ static bool signal_handler_installed_;
+ static struct sigaction old_signal_handler_;
+};
+
+struct sigaction SignalHandler::old_signal_handler_;
+bool SignalHandler::signal_handler_installed_ = false;
+
+
+void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
+ void* context) {
+#if defined(__native_client__)
+ // As Native Client does not support signal handling, profiling
+ // is disabled.
+ return;
+#else
+ USE(info);
+ if (signal != SIGPROF) return;
+ Isolate* isolate = Isolate::UncheckedCurrent();
+ if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
+ // We require a fully initialized and entered isolate.
+ return;
+ }
+ if (v8::Locker::IsActive() &&
+ !isolate->thread_manager()->IsLockedByCurrentThread()) {
+ return;
+ }
+
+ Sampler* sampler = isolate->logger()->sampler();
+ if (sampler == NULL || !sampler->IsActive()) return;
+
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+ ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
+ Isolate::PerIsolateThreadData* per_thread_data = isolate->
+ FindPerThreadDataForThread(thread_id);
+ if (!per_thread_data) return;
+ Simulator* sim = per_thread_data->simulator();
+ // Check if there is active simulator before allocating TickSample.
+ if (!sim) return;
+#endif
+#endif // USE_SIMULATOR
+
+ TickSample sample_obj;
+ TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
+#elif V8_TARGET_ARCH_MIPS
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
+#endif // V8_TARGET_ARCH_*
+#else
+ // Extracting the sample from the context is extremely machine dependent.
+ ucontext_t* ucontext = reinterpret_cast<ucontext_t*>(context);
+ mcontext_t& mcontext = ucontext->uc_mcontext;
+ sample->state = isolate->current_vm_state();
+#if defined(__linux__) || defined(__ANDROID__)
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_RBP]);
+#elif V8_HOST_ARCH_ARM
+#if defined(__GLIBC__) && !defined(__UCLIBC__) && \
+ (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+ // Old GLibc ARM versions used a gregs[] array to access the register
+ // values from mcontext_t.
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[R15]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[R13]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[R11]);
+#else
+ sample->pc = reinterpret_cast<Address>(mcontext.arm_pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.arm_sp);
+ sample->fp = reinterpret_cast<Address>(mcontext.arm_fp);
+#endif // defined(__GLIBC__) && !defined(__UCLIBC__) &&
+ // (__GLIBC__ < 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ <= 3))
+#elif V8_HOST_ARCH_MIPS
+ sample->pc = reinterpret_cast<Address>(mcontext.pc);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#endif // V8_HOST_ARCH_*
+#elif defined(__FreeBSD__)
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_eip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_esp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_ebp);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_rip);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_rsp);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_rbp);
+#elif V8_HOST_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(mcontext.mc_r15);
+ sample->sp = reinterpret_cast<Address>(mcontext.mc_r13);
+ sample->fp = reinterpret_cast<Address>(mcontext.mc_r11);
+#endif // V8_HOST_ARCH_*
+#elif defined(__NetBSD__)
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_EIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_ESP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_EBP]);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(mcontext.__gregs[_REG_RIP]);
+ sample->sp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RSP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.__gregs[_REG_RBP]);
+#endif // V8_HOST_ARCH_*
+#elif defined(__OpenBSD__)
+ USE(mcontext);
+#if V8_HOST_ARCH_IA32
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_eip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_esp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_ebp);
+#elif V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(ucontext->sc_rip);
+ sample->sp = reinterpret_cast<Address>(ucontext->sc_rsp);
+ sample->fp = reinterpret_cast<Address>(ucontext->sc_rbp);
+#endif // V8_HOST_ARCH_*
+#elif defined(__sun)
+ sample->pc = reinterpret_cast<Address>(mcontext.gregs[REG_PC]);
+ sample->sp = reinterpret_cast<Address>(mcontext.gregs[REG_SP]);
+ sample->fp = reinterpret_cast<Address>(mcontext.gregs[REG_FP]);
+#endif // __sun
+#endif // USE_SIMULATOR
+
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
+#endif // __native_client__
+}
+
+#elif defined(__MACH__)
+class Sampler::PlatformData : public Malloced {
+ public:
+ PlatformData()
+ : profiled_thread_(mach_thread_self()),
+ profiled_thread_id_(ThreadId::Current()) {}
+
+ ~PlatformData() {
+ // Deallocate Mach port for thread.
+ mach_port_deallocate(mach_task_self(), profiled_thread_);
+ }
+
+ thread_act_t profiled_thread() { return profiled_thread_; }
+ ThreadId profiled_thread_id() { return profiled_thread_id_; }
+
+ private:
+ // Note: for profiled_thread_ Mach primitives are used instead of PThread's
+ // because the latter doesn't provide thread manipulation primitives required.
+ // For details, consult "Mac OS X Internals" book, Section 7.3.
+ thread_act_t profiled_thread_;
+ ThreadId profiled_thread_id_;
+};
+
+#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+
+// ----------------------------------------------------------------------------
+// Win32 profiler support. On Cygwin we use the same sampler implementation as
+// on Win32.
+
+class Sampler::PlatformData : public Malloced {
+ public:
+ // Get a handle to the calling thread. This is the thread that we are
+ // going to profile. We need to make a copy of the handle because we are
+ // going to use it in the sampler thread. Using GetThreadHandle() will
+ // not work in this case. We're using OpenThread because DuplicateHandle
+ // for some reason doesn't work in Chrome's sandbox.
+ PlatformData()
+ : profiled_thread_(OpenThread(THREAD_GET_CONTEXT |
+ THREAD_SUSPEND_RESUME |
+ THREAD_QUERY_INFORMATION,
+ false,
+ GetCurrentThreadId())),
+ profiled_thread_id_(ThreadId::Current()) {}
+
+ ~PlatformData() {
+ if (profiled_thread_ != NULL) {
+ CloseHandle(profiled_thread_);
+ profiled_thread_ = NULL;
+ }
+ }
+
+ HANDLE profiled_thread() { return profiled_thread_; }
+ ThreadId profiled_thread_id() { return profiled_thread_id_; }
+
+ private:
+ HANDLE profiled_thread_;
+ ThreadId profiled_thread_id_;
+};
+
+#endif
+
+
+class SamplerThread : public Thread {
+ public:
+ static const int kSamplerThreadStackSize = 64 * KB;
+
+ explicit SamplerThread(int interval)
+ : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+ interval_(interval) {}
+
+ static void SetUp() { if (!mutex_) mutex_ = OS::CreateMutex(); }
+ static void TearDown() { delete mutex_; }
+
+ static void AddActiveSampler(Sampler* sampler) {
+ bool need_to_start = false;
+ ScopedLock lock(mutex_);
+ if (instance_ == NULL) {
+ // Start a thread that will send SIGPROF signal to VM threads,
+ // when CPU profiling will be enabled.
+ instance_ = new SamplerThread(sampler->interval());
+ need_to_start = true;
+ }
+
+ ASSERT(sampler->IsActive());
+ ASSERT(!instance_->active_samplers_.Contains(sampler));
+ ASSERT(instance_->interval_ == sampler->interval());
+ instance_->active_samplers_.Add(sampler);
+
+#if defined(USE_SIGNALS)
+ SignalHandler::EnsureInstalled();
+#endif
+ if (need_to_start) instance_->StartSynchronously();
+ }
+
+ static void RemoveActiveSampler(Sampler* sampler) {
+ SamplerThread* instance_to_remove = NULL;
+ {
+ ScopedLock lock(mutex_);
+
+ ASSERT(sampler->IsActive());
+ bool removed = instance_->active_samplers_.RemoveElement(sampler);
+ ASSERT(removed);
+ USE(removed);
+
+ // We cannot delete the instance immediately as we need to Join() the
+ // thread but we are holding mutex_ and the thread may try to acquire it.
+ if (instance_->active_samplers_.is_empty()) {
+ instance_to_remove = instance_;
+ instance_ = NULL;
+#if defined(USE_SIGNALS)
+ SignalHandler::Restore();
+#endif
+ }
+ }
+
+ if (!instance_to_remove) return;
+ instance_to_remove->Join();
+ delete instance_to_remove;
+ }
+
+ // Implement Thread::Run().
+ virtual void Run() {
+ while (true) {
+ {
+ ScopedLock lock(mutex_);
+ if (active_samplers_.is_empty()) break;
+ // When CPU profiling is enabled both JavaScript and C++ code is
+ // profiled. We must not suspend.
+ for (int i = 0; i < active_samplers_.length(); ++i) {
+ Sampler* sampler = active_samplers_.at(i);
+ if (!sampler->isolate()->IsInitialized()) continue;
+ if (!sampler->IsProfiling()) continue;
+ SampleContext(sampler);
+ }
+ }
+ OS::Sleep(interval_);
+ }
+ }
+
+ private:
+#if defined(USE_SIGNALS)
+
+ void SampleContext(Sampler* sampler) {
+ if (!SignalHandler::Installed()) return;
+ pthread_t tid = sampler->platform_data()->vm_tid();
+ int result = pthread_kill(tid, SIGPROF);
+ USE(result);
+ ASSERT(result == 0);
+ }
+
+#elif defined(__MACH__)
+
+ void SampleContext(Sampler* sampler) {
+ thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
+ Isolate* isolate = sampler->isolate();
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+ ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
+ Isolate::PerIsolateThreadData* per_thread_data = isolate->
+ FindPerThreadDataForThread(thread_id);
+ if (!per_thread_data) return;
+ Simulator* sim = per_thread_data->simulator();
+ // Check if there is active simulator before allocating TickSample.
+ if (!sim) return;
+#endif
+#endif // USE_SIMULATOR
+ TickSample sample_obj;
+ TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ if (KERN_SUCCESS != thread_suspend(profiled_thread)) return;
+
+#if V8_HOST_ARCH_X64
+ thread_state_flavor_t flavor = x86_THREAD_STATE64;
+ x86_thread_state64_t state;
+ mach_msg_type_number_t count = x86_THREAD_STATE64_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __r ## name
+#else
+#define REGISTER_FIELD(name) r ## name
+#endif // __DARWIN_UNIX03
+#elif V8_HOST_ARCH_IA32
+ thread_state_flavor_t flavor = i386_THREAD_STATE;
+ i386_thread_state_t state;
+ mach_msg_type_number_t count = i386_THREAD_STATE_COUNT;
+#if __DARWIN_UNIX03
+#define REGISTER_FIELD(name) __e ## name
+#else
+#define REGISTER_FIELD(name) e ## name
+#endif // __DARWIN_UNIX03
+#else
+#error Unsupported Mac OS X host architecture.
+#endif // V8_HOST_ARCH
+
+ if (thread_get_state(profiled_thread,
+ flavor,
+ reinterpret_cast<natural_t*>(&state),
+ &count) == KERN_SUCCESS) {
+ sample->state = isolate->current_vm_state();
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
+#elif V8_TARGET_ARCH_MIPS
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
+#endif
+#else
+ sample->pc = reinterpret_cast<Address>(state.REGISTER_FIELD(ip));
+ sample->sp = reinterpret_cast<Address>(state.REGISTER_FIELD(sp));
+ sample->fp = reinterpret_cast<Address>(state.REGISTER_FIELD(bp));
+#endif // USE_SIMULATOR
+#undef REGISTER_FIELD
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
+ }
+ thread_resume(profiled_thread);
+ }
+
+#elif defined(_WIN32) || defined(_WIN64) || defined(__CYGWIN__)
+
+ void SampleContext(Sampler* sampler) {
+ HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
+ if (profiled_thread == NULL) return;
+
+ // Context used for sampling the register state of the profiled thread.
+ CONTEXT context;
+ memset(&context, 0, sizeof(context));
+
+ Isolate* isolate = sampler->isolate();
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
+ ThreadId thread_id = sampler->platform_data()->profiled_thread_id();
+ Isolate::PerIsolateThreadData* per_thread_data = isolate->
+ FindPerThreadDataForThread(thread_id);
+ if (!per_thread_data) return;
+ Simulator* sim = per_thread_data->simulator();
+ // Check if there is active simulator before allocating TickSample.
+ if (!sim) return;
+#endif
+#endif // USE_SIMULATOR
+ TickSample sample_obj;
+ TickSample* sample = isolate->cpu_profiler()->TickSampleEvent();
+ if (sample == NULL) sample = &sample_obj;
+
+ static const DWORD kSuspendFailed = static_cast<DWORD>(-1);
+ if (SuspendThread(profiled_thread) == kSuspendFailed) return;
+ sample->state = isolate->current_vm_state();
+
+ context.ContextFlags = CONTEXT_FULL;
+ if (GetThreadContext(profiled_thread, &context) != 0) {
+#if defined(USE_SIMULATOR)
+#if V8_TARGET_ARCH_ARM
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::r11));
+#elif V8_TARGET_ARCH_MIPS
+ sample->pc = reinterpret_cast<Address>(sim->get_register(Simulator::pc));
+ sample->sp = reinterpret_cast<Address>(sim->get_register(Simulator::sp));
+ sample->fp = reinterpret_cast<Address>(sim->get_register(Simulator::fp));
+#endif
+#else
+#if V8_HOST_ARCH_X64
+ sample->pc = reinterpret_cast<Address>(context.Rip);
+ sample->sp = reinterpret_cast<Address>(context.Rsp);
+ sample->fp = reinterpret_cast<Address>(context.Rbp);
+#else
+ sample->pc = reinterpret_cast<Address>(context.Eip);
+ sample->sp = reinterpret_cast<Address>(context.Esp);
+ sample->fp = reinterpret_cast<Address>(context.Ebp);
+#endif
+#endif // USE_SIMULATOR
+ sampler->SampleStack(sample);
+ sampler->Tick(sample);
+ }
+ ResumeThread(profiled_thread);
+ }
+
+#endif // USE_SIGNALS
+
+
+ // Protects the process wide state below.
+ static Mutex* mutex_;
+ static SamplerThread* instance_;
+
+ const int interval_;
+ List<Sampler*> active_samplers_;
+
+ DISALLOW_COPY_AND_ASSIGN(SamplerThread);
+};
+
+
+Mutex* SamplerThread::mutex_ = NULL;
+SamplerThread* SamplerThread::instance_ = NULL;
+
+
+//
+// StackTracer implementation
+//
+DISABLE_ASAN void TickSample::Trace(Isolate* isolate) {
+ ASSERT(isolate->IsInitialized());
+
+ // Avoid collecting traces while doing GC.
+ if (state == GC) return;
+
+ const Address js_entry_sp =
+ Isolate::js_entry_sp(isolate->thread_local_top());
+ if (js_entry_sp == 0) {
+ // Not executing JS now.
+ return;
+ }
+
+ external_callback = isolate->external_callback();
+
+ SafeStackTraceFrameIterator it(isolate, fp, sp, sp, js_entry_sp);
+ int i = 0;
+ while (!it.done() && i < TickSample::kMaxFramesCount) {
+ stack[i++] = it.frame()->pc();
+ it.Advance();
+ }
+ frames_count = i;
+}
+
+
+void Sampler::SetUp() {
+ SamplerThread::SetUp();
+}
+
+
+void Sampler::TearDown() {
+ SamplerThread::TearDown();
+}
+
+
+Sampler::Sampler(Isolate* isolate, int interval)
+ : isolate_(isolate),
+ interval_(interval),
+ profiling_(false),
+ active_(false),
+ samples_taken_(0) {
+ data_ = new PlatformData;
+}
+
+
+Sampler::~Sampler() {
+ ASSERT(!IsActive());
+ delete data_;
+}
+
+void Sampler::Start() {
+ ASSERT(!IsActive());
+ SetActive(true);
+ SamplerThread::AddActiveSampler(this);
+}
+
+
+void Sampler::Stop() {
+ ASSERT(IsActive());
+ SamplerThread::RemoveActiveSampler(this);
+ SetActive(false);
+}
+
+void Sampler::SampleStack(TickSample* sample) {
+ sample->Trace(isolate_);
+ if (++samples_taken_ < 0) samples_taken_ = 0;
+}
+
+} } // namespace v8::internal
diff --git a/deps/v8/src/sampler.h b/deps/v8/src/sampler.h
new file mode 100644
index 000000000..a76d8b9a5
--- /dev/null
+++ b/deps/v8/src/sampler.h
@@ -0,0 +1,120 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_SAMPLER_H_
+#define V8_SAMPLER_H_
+
+#include "atomicops.h"
+#include "v8globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// ----------------------------------------------------------------------------
+// Sampler
+//
+// A sampler periodically samples the state of the VM and optionally
+// (if used for profiling) the program counter and stack pointer for
+// the thread that created it.
+
+// TickSample captures the information collected for each sample.
+struct TickSample {
+ TickSample()
+ : state(OTHER),
+ pc(NULL),
+ sp(NULL),
+ fp(NULL),
+ external_callback(NULL),
+ frames_count(0) {}
+ void Trace(Isolate* isolate);
+ StateTag state; // The state of the VM.
+ Address pc; // Instruction pointer.
+ Address sp; // Stack pointer.
+ Address fp; // Frame pointer.
+ Address external_callback;
+ static const int kMaxFramesCount = 64;
+ Address stack[kMaxFramesCount]; // Call stack.
+ int frames_count : 8; // Number of captured frames.
+};
+
+class Sampler {
+ public:
+ // Initializes the Sampler support. Called once at VM startup.
+ static void SetUp();
+ static void TearDown();
+
+ // Initialize sampler.
+ Sampler(Isolate* isolate, int interval);
+ virtual ~Sampler();
+
+ Isolate* isolate() const { return isolate_; }
+ int interval() const { return interval_; }
+
+ // Performs stack sampling.
+ void SampleStack(TickSample* sample);
+
+ // This method is called for each sampling period with the current
+ // program counter.
+ virtual void Tick(TickSample* sample) = 0;
+
+ // Start and stop sampler.
+ void Start();
+ void Stop();
+
+ // Is the sampler used for profiling?
+ bool IsProfiling() const { return NoBarrier_Load(&profiling_) > 0; }
+ void IncreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, 1); }
+ void DecreaseProfilingDepth() { NoBarrier_AtomicIncrement(&profiling_, -1); }
+
+ // Whether the sampler is running (that is, consumes resources).
+ bool IsActive() const { return NoBarrier_Load(&active_); }
+
+ // Used in tests to make sure that stack sampling is performed.
+ int samples_taken() const { return samples_taken_; }
+ void ResetSamplesTaken() { samples_taken_ = 0; }
+
+ class PlatformData;
+ PlatformData* platform_data() const { return data_; }
+
+ private:
+ void SetActive(bool value) { NoBarrier_Store(&active_, value); }
+
+ Isolate* isolate_;
+ const int interval_;
+ Atomic32 profiling_;
+ Atomic32 active_;
+ PlatformData* data_; // Platform specific data.
+ int samples_taken_; // Counts stack samples taken.
+ DISALLOW_IMPLICIT_CONSTRUCTORS(Sampler);
+};
+
+
+} } // namespace v8::internal
+
+#endif // V8_SAMPLER_H_
diff --git a/deps/v8/src/scanner.h b/deps/v8/src/scanner.h
index dd1bfb8bf..92418f72b 100644
--- a/deps/v8/src/scanner.h
+++ b/deps/v8/src/scanner.h
@@ -42,26 +42,6 @@ namespace v8 {
namespace internal {
-// General collection of (multi-)bit-flags that can be passed to scanners and
-// parsers to signify their (initial) mode of operation.
-enum ParsingFlags {
- kNoParsingFlags = 0,
- // Embed LanguageMode values in parsing flags, i.e., equivalent to:
- // CLASSIC_MODE = 0,
- // STRICT_MODE,
- // EXTENDED_MODE,
- kLanguageModeMask = 0x03,
- kAllowLazy = 0x04,
- kAllowNativesSyntax = 0x08,
- kAllowModules = 0x10,
- kAllowGenerators = 0x20
-};
-
-STATIC_ASSERT((kLanguageModeMask & CLASSIC_MODE) == CLASSIC_MODE);
-STATIC_ASSERT((kLanguageModeMask & STRICT_MODE) == STRICT_MODE);
-STATIC_ASSERT((kLanguageModeMask & EXTENDED_MODE) == EXTENDED_MODE);
-
-
// Returns the value (0 .. 15) of a hexadecimal character c.
// If c is not a legal hexadecimal character, returns a value < 0.
inline int HexValue(uc32 c) {
@@ -235,7 +215,7 @@ class LiteralBuffer {
void ExpandBuffer() {
Vector<byte> new_store = Vector<byte>::New(NewCapacity(kInitialCapacity));
- memcpy(new_store.start(), backing_store_.start(), position_);
+ OS::MemCopy(new_store.start(), backing_store_.start(), position_);
backing_store_.Dispose();
backing_store_ = new_store;
}
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 4ac9d0e6a..10548f993 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -197,6 +197,8 @@ void Scope::SetDefaults(ScopeType type,
outer_scope_calls_non_strict_eval_ = false;
inner_scope_calls_eval_ = false;
force_eager_compilation_ = false;
+ force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
+ ? outer_scope->has_forced_context_allocation() : false;
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
@@ -603,12 +605,18 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
}
- // Collect temporaries which are always allocated on the stack.
+ // Collect temporaries which are always allocated on the stack, unless the
+ // context as a whole has forced context allocation.
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
if (var->is_used()) {
- ASSERT(var->IsStackLocal());
- stack_locals->Add(var, zone());
+ if (var->IsContextSlot()) {
+ ASSERT(has_forced_context_allocation());
+ context_locals->Add(var, zone());
+ } else {
+ ASSERT(var->IsStackLocal());
+ stack_locals->Add(var, zone());
+ }
}
}
@@ -1182,8 +1190,11 @@ bool Scope::MustAllocateInContext(Variable* var) {
// an eval() call or a runtime with lookup), it must be allocated in the
// context.
//
- // Exceptions: temporary variables are never allocated in a context;
- // catch-bound variables are always allocated in a context.
+ // Exceptions: If the scope as a whole has forced context allocation, all
+ // variables will have context allocation, even temporaries. Otherwise
+ // temporary variables are always stack-allocated. Catch-bound variables are
+ // always context-allocated.
+ if (has_forced_context_allocation()) return true;
if (var->mode() == TEMPORARY) return false;
if (var->mode() == INTERNAL) return true;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index 3ca2dcf0c..66384a1c0 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -269,6 +269,15 @@ class Scope: public ZoneObject {
end_position_ = statement_pos;
}
+ // In some cases we want to force context allocation for a whole scope.
+ void ForceContextAllocation() {
+ ASSERT(!already_resolved());
+ force_context_allocation_ = true;
+ }
+ bool has_forced_context_allocation() const {
+ return force_context_allocation_;
+ }
+
// ---------------------------------------------------------------------------
// Predicates.
@@ -494,6 +503,7 @@ class Scope: public ZoneObject {
bool outer_scope_calls_non_strict_eval_;
bool inner_scope_calls_eval_;
bool force_eager_compilation_;
+ bool force_context_allocation_;
// True if it doesn't need scope resolution (e.g., if the scope was
// constructed based on a serialized scope info or a catch context).
diff --git a/deps/v8/src/serialize.h b/deps/v8/src/serialize.h
index e0bcf4e18..dc9ffd62b 100644
--- a/deps/v8/src/serialize.h
+++ b/deps/v8/src/serialize.h
@@ -311,7 +311,7 @@ int SnapshotByteSource::GetInt() {
void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
- memcpy(to, data_ + position_, number_of_bytes);
+ OS::MemCopy(to, data_ + position_, number_of_bytes);
position_ += number_of_bytes;
}
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index f8e6a1e87..e413bf1ff 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -467,6 +467,11 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->progress_bar_ = 0;
chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->parallel_sweeping_ = 0;
+ chunk->available_in_small_free_list_ = 0;
+ chunk->available_in_medium_free_list_ = 0;
+ chunk->available_in_large_free_list_ = 0;
+ chunk->available_in_huge_free_list_ = 0;
+ chunk->non_available_small_blocks_ = 0;
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -697,6 +702,15 @@ MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
}
+void Page::ResetFreeListStatistics() {
+ non_available_small_blocks_ = 0;
+ available_in_small_free_list_ = 0;
+ available_in_medium_free_list_ = 0;
+ available_in_large_free_list_ = 0;
+ available_in_huge_free_list_ = 0;
+}
+
+
Page* MemoryAllocator::AllocatePage(intptr_t size,
PagedSpace* owner,
Executability executable) {
@@ -1057,6 +1071,23 @@ int PagedSpace::CountTotalPages() {
}
+void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
+ sizes->huge_size_ = page->available_in_huge_free_list();
+ sizes->small_size_ = page->available_in_small_free_list();
+ sizes->medium_size_ = page->available_in_medium_free_list();
+ sizes->large_size_ = page->available_in_large_free_list();
+}
+
+
+void PagedSpace::ResetFreeListStatistics() {
+ PageIterator page_iterator(this);
+ while (page_iterator.has_next()) {
+ Page* page = page_iterator.next();
+ page->ResetFreeListStatistics();
+ }
+}
+
+
void PagedSpace::ReleasePage(Page* page, bool unlink) {
ASSERT(page->LiveBytes() == 0);
ASSERT(AreaSize() == page->area_size());
@@ -2056,20 +2087,6 @@ void FreeListCategory::Reset() {
}
-intptr_t FreeListCategory::CountFreeListItemsInList(Page* p) {
- int sum = 0;
- FreeListNode* n = top_;
- while (n != NULL) {
- if (Page::FromAddress(n->address()) == p) {
- FreeSpace* free_space = reinterpret_cast<FreeSpace*>(n);
- sum += free_space->Size();
- }
- n = n->next();
- }
- return sum;
-}
-
-
intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
int sum = 0;
FreeListNode** n = &top_;
@@ -2170,20 +2187,28 @@ int FreeList::Free(Address start, int size_in_bytes) {
FreeListNode* node = FreeListNode::FromAddress(start);
node->set_size(heap_, size_in_bytes);
+ Page* page = Page::FromAddress(start);
// Early return to drop too-small blocks on the floor.
- if (size_in_bytes < kSmallListMin) return size_in_bytes;
+ if (size_in_bytes < kSmallListMin) {
+ page->add_non_available_small_blocks(size_in_bytes);
+ return size_in_bytes;
+ }
// Insert other blocks at the head of a free list of the appropriate
// magnitude.
if (size_in_bytes <= kSmallListMax) {
small_list_.Free(node, size_in_bytes);
+ page->add_available_in_small_free_list(size_in_bytes);
} else if (size_in_bytes <= kMediumListMax) {
medium_list_.Free(node, size_in_bytes);
+ page->add_available_in_medium_free_list(size_in_bytes);
} else if (size_in_bytes <= kLargeListMax) {
large_list_.Free(node, size_in_bytes);
+ page->add_available_in_large_free_list(size_in_bytes);
} else {
huge_list_.Free(node, size_in_bytes);
+ page->add_available_in_huge_free_list(size_in_bytes);
}
ASSERT(IsVeryLong() || available() == SumFreeLists());
@@ -2193,20 +2218,33 @@ int FreeList::Free(Address start, int size_in_bytes) {
FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* node = NULL;
+ Page* page = NULL;
if (size_in_bytes <= kSmallAllocationMax) {
node = small_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
+ if (node != NULL) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_small_free_list(-(*node_size));
+ return node;
+ }
}
if (size_in_bytes <= kMediumAllocationMax) {
node = medium_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
+ if (node != NULL) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_medium_free_list(-(*node_size));
+ return node;
+ }
}
if (size_in_bytes <= kLargeAllocationMax) {
node = large_list_.PickNodeFromList(node_size);
- if (node != NULL) return node;
+ if (node != NULL) {
+ page = Page::FromAddress(node->address());
+ page->add_available_in_large_free_list(-(*node_size));
+ return node;
+ }
}
int huge_list_available = huge_list_.available();
@@ -2216,7 +2254,10 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
FreeListNode* cur_node = *cur;
while (cur_node != NULL &&
Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
- huge_list_available -= reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+ huge_list_available -= size;
+ page = Page::FromAddress(cur_node->address());
+ page->add_available_in_huge_free_list(-size);
cur_node = cur_node->next();
}
@@ -2235,6 +2276,8 @@ FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
*cur = node->next();
*node_size = size;
huge_list_available -= size;
+ page = Page::FromAddress(node->address());
+ page->add_available_in_huge_free_list(-size);
break;
}
}
@@ -2322,27 +2365,17 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
}
-void FreeList::CountFreeListItems(Page* p, SizeStats* sizes) {
- sizes->huge_size_ = huge_list_.CountFreeListItemsInList(p);
- if (sizes->huge_size_ < p->area_size()) {
- sizes->small_size_ = small_list_.CountFreeListItemsInList(p);
- sizes->medium_size_ = medium_list_.CountFreeListItemsInList(p);
- sizes->large_size_ = large_list_.CountFreeListItemsInList(p);
- } else {
- sizes->small_size_ = 0;
- sizes->medium_size_ = 0;
- sizes->large_size_ = 0;
- }
-}
-
-
intptr_t FreeList::EvictFreeListItems(Page* p) {
intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+ p->set_available_in_huge_free_list(0);
if (sum < p->area_size()) {
sum += small_list_.EvictFreeListItemsInList(p) +
medium_list_.EvictFreeListItemsInList(p) +
large_list_.EvictFreeListItemsInList(p);
+ p->set_available_in_small_free_list(0);
+ p->set_available_in_medium_free_list(0);
+ p->set_available_in_large_free_list(0);
}
return sum;
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 65eefd015..e7e4d529f 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -547,7 +547,8 @@ class MemoryChunk {
kSlotsBufferOffset + kPointerSize + kPointerSize;
static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
- kIntSize + kIntSize + kPointerSize;
+ kIntSize + kIntSize + kPointerSize +
+ 5 * kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -701,6 +702,13 @@ class MemoryChunk {
intptr_t parallel_sweeping_;
+ // PagedSpace free-list statistics.
+ intptr_t available_in_small_free_list_;
+ intptr_t available_in_medium_free_list_;
+ intptr_t available_in_large_free_list_;
+ intptr_t available_in_huge_free_list_;
+ intptr_t non_available_small_blocks_;
+
static MemoryChunk* Initialize(Heap* heap,
Address base,
size_t size,
@@ -797,6 +805,21 @@ class Page : public MemoryChunk {
void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
+ void ResetFreeListStatistics();
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+ type name() { return name##_; } \
+ void set_##name(type name) { name##_ = name; } \
+ void add_##name(type name) { name##_ += name; }
+
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
+ FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+
+#undef FRAGMENTATION_STATS_ACCESSORS
+
#ifdef DEBUG
void Print();
#endif // DEBUG
@@ -1432,8 +1455,6 @@ class FreeListCategory {
FreeListNode* PickNodeFromList(int *node_size);
- intptr_t CountFreeListItemsInList(Page* p);
-
intptr_t EvictFreeListItemsInList(Page* p);
void RepairFreeList(Heap* heap);
@@ -1528,19 +1549,6 @@ class FreeList BASE_EMBEDDED {
// Used after booting the VM.
void RepairLists(Heap* heap);
- struct SizeStats {
- intptr_t Total() {
- return small_size_ + medium_size_ + large_size_ + huge_size_;
- }
-
- intptr_t small_size_;
- intptr_t medium_size_;
- intptr_t large_size_;
- intptr_t huge_size_;
- };
-
- void CountFreeListItems(Page* p, SizeStats* sizes);
-
intptr_t EvictFreeListItems(Page* p);
FreeListCategory* small_list() { return &small_list_; }
@@ -1625,6 +1633,20 @@ class PagedSpace : public Space {
// Approximate amount of physical memory committed for this space.
size_t CommittedPhysicalMemory();
+ struct SizeStats {
+ intptr_t Total() {
+ return small_size_ + medium_size_ + large_size_ + huge_size_;
+ }
+
+ intptr_t small_size_;
+ intptr_t medium_size_;
+ intptr_t large_size_;
+ intptr_t huge_size_;
+ };
+
+ void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
+ void ResetFreeListStatistics();
+
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -1632,6 +1654,7 @@ class PagedSpace : public Space {
// to the available and wasted totals.
void ClearStats() {
accounting_stats_.ClearSizeWaste();
+ ResetFreeListStatistics();
}
// Increases the number of available bytes of that space.
@@ -1785,10 +1808,6 @@ class PagedSpace : public Space {
Page* FirstPage() { return anchor_.next_page(); }
Page* LastPage() { return anchor_.prev_page(); }
- void CountFreeListItems(Page* p, FreeList::SizeStats* sizes) {
- free_list_.CountFreeListItems(p, sizes);
- }
-
void EvictEvacuationCandidatesFromFreeLists();
bool CanExpand();
diff --git a/deps/v8/src/string-stream.cc b/deps/v8/src/string-stream.cc
index bcc30f9ab..61eec0d69 100644
--- a/deps/v8/src/string-stream.cc
+++ b/deps/v8/src/string-stream.cc
@@ -254,7 +254,7 @@ void StringStream::Add(const char* format, FmtElm arg0, FmtElm arg1,
SmartArrayPointer<const char> StringStream::ToCString() const {
char* str = NewArray<char>(length_ + 1);
- memcpy(str, buffer_, length_);
+ OS::MemCopy(str, buffer_, length_);
str[length_] = '\0';
return SmartArrayPointer<const char>(str);
}
@@ -575,7 +575,7 @@ char* HeapStringAllocator::grow(unsigned* bytes) {
if (new_space == NULL) {
return space_;
}
- memcpy(new_space, space_, *bytes);
+ OS::MemCopy(new_space, space_, *bytes);
*bytes = new_bytes;
DeleteArray(space_);
space_ = new_space;
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index 2f8043c60..4cae85b02 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -25,24 +25,22 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $String = global.String;
// var $NaN = 0/0;
+// -------------------------------------------------------------------
-// Set the String function and constructor.
-%SetCode($String, function(x) {
+function StringConstructor(x) {
var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
}
-});
+}
-%FunctionSetPrototype($String, new $String());
// ECMA-262 section 15.5.4.2
function StringToString() {
@@ -994,16 +992,19 @@ SetUpLockedPrototype(ReplaceResultBuilder,
function SetUpString() {
%CheckIsBootstrapping();
+
+ // Set the String function and constructor.
+ %SetCode($String, StringConstructor);
+ %FunctionSetPrototype($String, new $String());
+
// Set up the constructor property on the String prototype object.
%SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
-
// Set up the non-enumerable functions on the String object.
InstallFunctions($String, DONT_ENUM, $Array(
"fromCharCode", StringFromCharCode
));
-
// Set up the non-enumerable functions on the String prototype object.
InstallFunctions($String.prototype, DONT_ENUM, $Array(
"valueOf", StringValueOf,
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index 553c6f509..396e92ce3 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -402,18 +402,30 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
Handle<Code> StubCache::ComputeStoreField(Handle<Name> name,
Handle<JSObject> receiver,
LookupResult* lookup,
- Handle<Map> transition,
StrictModeFlag strict_mode) {
- Code::StubType type =
- transition.is_null() ? Code::FIELD : Code::MAP_TRANSITION;
+ Handle<Code> stub = FindIC(
+ name, receiver, Code::STORE_IC, Code::FIELD, strict_mode);
+ if (!stub.is_null()) return stub;
+
+ StoreStubCompiler compiler(isolate_, strict_mode);
+ Handle<Code> code = compiler.CompileStoreField(receiver, lookup, name);
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+Handle<Code> StubCache::ComputeStoreTransition(Handle<Name> name,
+ Handle<JSObject> receiver,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode) {
Handle<Code> stub = FindIC(
- name, receiver, Code::STORE_IC, type, strict_mode);
+ name, receiver, Code::STORE_IC, Code::MAP_TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
StoreStubCompiler compiler(isolate_, strict_mode);
Handle<Code> code =
- compiler.CompileStoreField(receiver, lookup, transition, name);
+ compiler.CompileStoreTransition(receiver, lookup, transition, name);
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -534,20 +546,35 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<Name> name,
return code;
}
+
Handle<Code> StubCache::ComputeKeyedStoreField(Handle<Name> name,
Handle<JSObject> receiver,
LookupResult* lookup,
- Handle<Map> transition,
StrictModeFlag strict_mode) {
- Code::StubType type =
- (transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Handle<Code> stub = FindIC(
- name, receiver, Code::KEYED_STORE_IC, type, strict_mode);
+ name, receiver, Code::KEYED_STORE_IC, Code::FIELD, strict_mode);
+ if (!stub.is_null()) return stub;
+
+ KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
+ Handle<Code> code = compiler.CompileStoreField(receiver, lookup, name);
+ JSObject::UpdateMapCodeCache(receiver, name, code);
+ return code;
+}
+
+
+Handle<Code> StubCache::ComputeKeyedStoreTransition(
+ Handle<Name> name,
+ Handle<JSObject> receiver,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode) {
+ Handle<Code> stub = FindIC(
+ name, receiver, Code::KEYED_STORE_IC, Code::MAP_TRANSITION, strict_mode);
if (!stub.is_null()) return stub;
KeyedStoreStubCompiler compiler(isolate(), strict_mode, STANDARD_STORE);
Handle<Code> code =
- compiler.CompileStoreField(receiver, lookup, transition, name);
+ compiler.CompileStoreTransition(receiver, lookup, transition, name);
JSObject::UpdateMapCodeCache(receiver, name, code);
return code;
}
@@ -1587,11 +1614,39 @@ Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
}
+Handle<Code> BaseStoreStubCompiler::CompileStoreTransition(
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name) {
+ Label miss, miss_restore_name;
+
+ GenerateNameCheck(name, this->name(), &miss);
+
+ GenerateStoreTransition(masm(),
+ object,
+ lookup,
+ transition,
+ name,
+ receiver(), this->name(), value(),
+ scratch1(), scratch2(),
+ &miss,
+ &miss_restore_name);
+
+ // Handle store cache miss.
+ GenerateRestoreName(masm(), &miss_restore_name, name);
+ __ bind(&miss);
+ TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+ // Return the generated code.
+ return GetICCode(kind(), Code::MAP_TRANSITION, name);
+}
+
+
Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
LookupResult* lookup,
- Handle<Map> transition,
Handle<Name> name) {
- Label miss, miss_restore_name;
+ Label miss;
GenerateNameCheck(name, this->name(), &miss);
@@ -1599,21 +1654,15 @@ Handle<Code> BaseStoreStubCompiler::CompileStoreField(Handle<JSObject> object,
GenerateStoreField(masm(),
object,
lookup,
- transition,
- name,
receiver(), this->name(), value(), scratch1(), scratch2(),
- &miss,
- &miss_restore_name);
+ &miss);
// Handle store cache miss.
- GenerateRestoreName(masm(), &miss_restore_name, name);
__ bind(&miss);
TailCallBuiltin(masm(), MissBuiltin(kind()));
// Return the generated code.
- return GetICCode(kind(),
- transition.is_null() ? Code::FIELD : Code::MAP_TRANSITION,
- name);
+ return GetICCode(kind(), Code::FIELD, name);
}
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index bca3b7bca..02bb541bd 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -162,9 +162,14 @@ class StubCache {
Handle<Code> ComputeStoreField(Handle<Name> name,
Handle<JSObject> object,
LookupResult* lookup,
- Handle<Map> transition,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeStoreTransition(Handle<Name> name,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode);
+
Handle<Code> ComputeStoreNormal(StrictModeFlag strict_mode);
Handle<Code> ComputeStoreGlobal(Handle<Name> name,
@@ -193,8 +198,12 @@ class StubCache {
Handle<Code> ComputeKeyedStoreField(Handle<Name> name,
Handle<JSObject> object,
LookupResult* lookup,
- Handle<Map> transition,
StrictModeFlag strict_mode);
+ Handle<Code> ComputeKeyedStoreTransition(Handle<Name> name,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ StrictModeFlag strict_mode);
Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
@@ -509,18 +518,28 @@ class StubCompiler BASE_EMBEDDED {
Register scratch2,
Label* miss_label);
+ void GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label,
+ Label* miss_restore_name);
+
void GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
- Label* miss_label,
- Label* miss_restore_name);
+ Label* miss_label);
static Builtins::Name MissBuiltin(Code::Kind kind) {
switch (kind) {
@@ -781,9 +800,13 @@ class BaseStoreStubCompiler: public StubCompiler {
virtual ~BaseStoreStubCompiler() { }
+ Handle<Code> CompileStoreTransition(Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name);
+
Handle<Code> CompileStoreField(Handle<JSObject> object,
LookupResult* lookup,
- Handle<Map> transition,
Handle<Name> name);
protected:
diff --git a/deps/v8/src/symbol.js b/deps/v8/src/symbol.js
index fb7476f43..050e7d918 100644
--- a/deps/v8/src/symbol.js
+++ b/deps/v8/src/symbol.js
@@ -27,8 +27,14 @@
"use strict";
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
var $Symbol = global.Symbol;
+// -------------------------------------------------------------------
+
function SymbolConstructor(x) {
var value =
IS_SYMBOL(x) ? x : %CreateSymbol(IS_UNDEFINED(x) ? x : ToString(x));
diff --git a/deps/v8/src/third_party/vtune/ittnotify_config.h b/deps/v8/src/third_party/vtune/ittnotify_config.h
new file mode 100644
index 000000000..412e34462
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/ittnotify_config.h
@@ -0,0 +1,484 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ITTNOTIFY_CONFIG_H_
+#define _ITTNOTIFY_CONFIG_H_
+
+/** @cond exclude_from_documentation */
+#ifndef ITT_OS_WIN
+# define ITT_OS_WIN 1
+#endif /* ITT_OS_WIN */
+
+#ifndef ITT_OS_LINUX
+# define ITT_OS_LINUX 2
+#endif /* ITT_OS_LINUX */
+
+#ifndef ITT_OS_MAC
+# define ITT_OS_MAC 3
+#endif /* ITT_OS_MAC */
+
+#ifndef ITT_OS
+# if defined WIN32 || defined _WIN32
+# define ITT_OS ITT_OS_WIN
+# elif defined( __APPLE__ ) && defined( __MACH__ )
+# define ITT_OS ITT_OS_MAC
+# else
+# define ITT_OS ITT_OS_LINUX
+# endif
+#endif /* ITT_OS */
+
+#ifndef ITT_PLATFORM_WIN
+# define ITT_PLATFORM_WIN 1
+#endif /* ITT_PLATFORM_WIN */
+
+#ifndef ITT_PLATFORM_POSIX
+# define ITT_PLATFORM_POSIX 2
+#endif /* ITT_PLATFORM_POSIX */
+
+#ifndef ITT_PLATFORM
+# if ITT_OS==ITT_OS_WIN
+# define ITT_PLATFORM ITT_PLATFORM_WIN
+# else
+# define ITT_PLATFORM ITT_PLATFORM_POSIX
+# endif /* _WIN32 */
+#endif /* ITT_PLATFORM */
+
+#if defined(_UNICODE) && !defined(UNICODE)
+#define UNICODE
+#endif
+
+#include <stddef.h>
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <tchar.h>
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <stdint.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE || _UNICODE */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#ifndef CDECL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define CDECL __cdecl
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* CDECL */
+
+#ifndef STDCALL
+# if ITT_PLATFORM==ITT_PLATFORM_WIN
+# define STDCALL __stdcall
+# else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define STDCALL /* not supported on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define STDCALL __attribute__ ((stdcall))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#endif /* STDCALL */
+
+#define ITTAPI CDECL
+#define LIBITTAPI CDECL
+
+/* TODO: Temporary for compatibility! */
+#define ITTAPI_CALL CDECL
+#define LIBITTAPI_CALL CDECL
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+/* use __forceinline (VC++ specific) */
+#define ITT_INLINE __forceinline
+#define ITT_INLINE_ATTRIBUTE /* nothing */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/*
+ * Generally, functions are not inlined unless optimization is specified.
+ * For functions declared inline, this attribute inlines the function even
+ * if no optimization level was specified.
+ */
+#ifdef __STRICT_ANSI__
+#define ITT_INLINE static
+#else /* __STRICT_ANSI__ */
+#define ITT_INLINE static inline
+#endif /* __STRICT_ANSI__ */
+#define ITT_INLINE_ATTRIBUTE __attribute__ ((always_inline))
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+/** @endcond */
+
+#ifndef ITT_ARCH_IA32
+# define ITT_ARCH_IA32 1
+#endif /* ITT_ARCH_IA32 */
+
+#ifndef ITT_ARCH_IA32E
+# define ITT_ARCH_IA32E 2
+#endif /* ITT_ARCH_IA32E */
+
+#ifndef ITT_ARCH_IA64
+# define ITT_ARCH_IA64 3
+#endif /* ITT_ARCH_IA64 */
+
+#ifndef ITT_ARCH
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define ITT_ARCH ITT_ARCH_IA32E
+# elif defined _M_IA64 || defined __ia64
+# define ITT_ARCH ITT_ARCH_IA64
+# else
+# define ITT_ARCH ITT_ARCH_IA32
+# endif
+#endif
+
+#ifdef __cplusplus
+# define ITT_EXTERN_C extern "C"
+#else
+# define ITT_EXTERN_C /* nothing */
+#endif /* __cplusplus */
+
+#define ITT_TO_STR_AUX(x) #x
+#define ITT_TO_STR(x) ITT_TO_STR_AUX(x)
+
+#define __ITT_BUILD_ASSERT(expr, suffix) do { static char __itt_build_check_##suffix[(expr) ? 1 : -1]; __itt_build_check_##suffix[0] = 0; } while(0)
+#define _ITT_BUILD_ASSERT(expr, suffix) __ITT_BUILD_ASSERT((expr), suffix)
+#define ITT_BUILD_ASSERT(expr) _ITT_BUILD_ASSERT((expr), __LINE__)
+
+#define ITT_MAGIC { 0xED, 0xAB, 0xAB, 0xEC, 0x0D, 0xEE, 0xDA, 0x30 }
+
+/* Replace with snapshot date YYYYMMDD for promotion build. */
+#define API_VERSION_BUILD 20111111
+
+#ifndef API_VERSION_NUM
+#define API_VERSION_NUM 0.0.0
+#endif /* API_VERSION_NUM */
+
+#define API_VERSION "ITT-API-Version " ITT_TO_STR(API_VERSION_NUM) " (" ITT_TO_STR(API_VERSION_BUILD) ")"
+
+/* OS communication functions */
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+typedef HMODULE lib_t;
+typedef DWORD TIDT;
+typedef CRITICAL_SECTION mutex_t;
+#define MUTEX_INITIALIZER { 0 }
+#define strong_alias(name, aliasname) /* empty for Windows */
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <dlfcn.h>
+#if defined(UNICODE) || defined(_UNICODE)
+#include <wchar.h>
+#endif /* UNICODE */
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE 1 /* need for PTHREAD_MUTEX_RECURSIVE */
+#endif /* _GNU_SOURCE */
+#include <pthread.h>
+typedef void* lib_t;
+typedef pthread_t TIDT;
+typedef pthread_mutex_t mutex_t;
+#define MUTEX_INITIALIZER PTHREAD_MUTEX_INITIALIZER
+#define _strong_alias(name, aliasname) extern __typeof (name) aliasname __attribute__ ((alias (#name)));
+#define strong_alias(name, aliasname) _strong_alias(name, aliasname)
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define __itt_get_proc(lib, name) GetProcAddress(lib, name)
+#define __itt_mutex_init(mutex) InitializeCriticalSection(mutex)
+#define __itt_mutex_lock(mutex) EnterCriticalSection(mutex)
+#define __itt_mutex_unlock(mutex) LeaveCriticalSection(mutex)
+#define __itt_load_lib(name) LoadLibraryA(name)
+#define __itt_unload_lib(handle) FreeLibrary(handle)
+#define __itt_system_error() (int)GetLastError()
+#define __itt_fstrcmp(s1, s2) lstrcmpA(s1, s2)
+#define __itt_fstrlen(s) lstrlenA(s)
+#define __itt_fstrcpyn(s1, s2, l) lstrcpynA(s1, s2, l)
+#define __itt_fstrdup(s) _strdup(s)
+#define __itt_thread_id() GetCurrentThreadId()
+#define __itt_thread_yield() SwitchToThread()
+#ifndef ITT_SIMPLE_INIT
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr)
+{
+ return InterlockedIncrement(ptr);
+}
+#endif /* ITT_SIMPLE_INIT */
+#else /* ITT_PLATFORM!=ITT_PLATFORM_WIN */
+#define __itt_get_proc(lib, name) dlsym(lib, name)
+#define __itt_mutex_init(mutex) \
+ { \
+ pthread_mutexattr_t mutex_attr; \
+ int error_code = pthread_mutexattr_init(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_init", error_code); \
+ error_code = pthread_mutexattr_settype(&mutex_attr, PTHREAD_MUTEX_RECURSIVE); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_settype", error_code); \
+ error_code = pthread_mutex_init(mutex, &mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutex_init", error_code); \
+ error_code = pthread_mutexattr_destroy(&mutex_attr); \
+ if (error_code) \
+ __itt_report_error(__itt_error_system, "pthread_mutexattr_destroy", error_code); \
+ }
+#define __itt_mutex_lock(mutex) pthread_mutex_lock(mutex)
+#define __itt_mutex_unlock(mutex) pthread_mutex_unlock(mutex)
+#define __itt_load_lib(name) dlopen(name, RTLD_LAZY)
+#define __itt_unload_lib(handle) dlclose(handle)
+#define __itt_system_error() errno
+#define __itt_fstrcmp(s1, s2) strcmp(s1, s2)
+#define __itt_fstrlen(s) strlen(s)
+#define __itt_fstrcpyn(s1, s2, l) strncpy(s1, s2, l)
+#define __itt_fstrdup(s) strdup(s)
+#define __itt_thread_id() pthread_self()
+#define __itt_thread_yield() sched_yield()
+#if ITT_ARCH==ITT_ARCH_IA64
+#ifdef __INTEL_COMPILER
+#define __TBB_machine_fetchadd4(addr, val) __fetchadd4_acq((void *)addr, val)
+#else /* __INTEL_COMPILER */
+/* TODO: Add Support for not Intel compilers for IA64 */
+#endif /* __INTEL_COMPILER */
+#else /* ITT_ARCH!=ITT_ARCH_IA64 */
+/*ITT_INLINE int __TBB_machine_fetchadd4(volatile void* ptr, long addend) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __TBB_machine_fetchadd4(volatile void* ptr, long addend)
+{
+ int result;
+ __asm__ __volatile__("lock\nxaddl %0,%1"
+ : "=r"(result),"=m"(*(long*)ptr)
+ : "0"(addend), "m"(*(long*)ptr)
+ : "memory");
+ return result;
+}
+*/
+#endif /* ITT_ARCH==ITT_ARCH_IA64 */
+#ifndef ITT_SIMPLE_INIT
+/*ITT_INLINE int __itt_interlocked_increment(volatile long* ptr) ITT_INLINE_ATTRIBUTE;
+ITT_INLINE int __itt_interlocked_increment(volatile long* ptr)
+{
+ return __TBB_machine_fetchadd4(ptr, 1) + 1;
+}
+*/
+#endif /* ITT_SIMPLE_INIT */
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+typedef enum {
+ __itt_collection_normal = 0,
+ __itt_collection_paused = 1
+} __itt_collection_state;
+
+typedef enum {
+ __itt_thread_normal = 0,
+ __itt_thread_ignored = 1
+} __itt_thread_state;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_thread_info
+{
+ const char* nameA; /*!< Copy of original name in ASCII. */
+#if defined(UNICODE) || defined(_UNICODE)
+ const wchar_t* nameW; /*!< Copy of original name in UNICODE. */
+#else /* UNICODE || _UNICODE */
+ void* nameW;
+#endif /* UNICODE || _UNICODE */
+ TIDT tid;
+ __itt_thread_state state; /*!< Thread state (paused or normal) */
+ int extra1; /*!< Reserved to the runtime */
+ void* extra2; /*!< Reserved to the runtime */
+ struct ___itt_thread_info* next;
+} __itt_thread_info;
+
+#include "ittnotify_types.h" /* For __itt_group_id definition */
+
+typedef struct ___itt_api_info_20101001
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ __itt_group_id group;
+} __itt_api_info_20101001;
+
+typedef struct ___itt_api_info
+{
+ const char* name;
+ void** func_ptr;
+ void* init_func;
+ void* null_func;
+ __itt_group_id group;
+} __itt_api_info;
+
+struct ___itt_domain;
+struct ___itt_string_handle;
+
+typedef struct ___itt_global
+{
+ unsigned char magic[8];
+ unsigned long version_major;
+ unsigned long version_minor;
+ unsigned long version_build;
+ volatile long api_initialized;
+ volatile long mutex_initialized;
+ volatile long atomic_counter;
+ mutex_t mutex;
+ lib_t lib;
+ void* error_handler;
+ const char** dll_path_ptr;
+ __itt_api_info* api_list_ptr;
+ struct ___itt_global* next;
+ /* Joinable structures below */
+ __itt_thread_info* thread_list;
+ struct ___itt_domain* domain_list;
+ struct ___itt_string_handle* string_list;
+ __itt_collection_state state;
+} __itt_global;
+
+#pragma pack(pop)
+
+#define NEW_THREAD_INFO_W(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = NULL; \
+ h->nameW = n ? _wcsdup(n) : NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_THREAD_INFO_A(gptr,h,h_tail,t,s,n) { \
+ h = (__itt_thread_info*)malloc(sizeof(__itt_thread_info)); \
+ if (h != NULL) { \
+ h->tid = t; \
+ h->nameA = n ? __itt_fstrdup(n) : NULL; \
+ h->nameW = NULL; \
+ h->state = s; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->thread_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_W(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = NULL; \
+ h->nameW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_DOMAIN_A(gptr,h,h_tail,name) { \
+ h = (__itt_domain*)malloc(sizeof(__itt_domain)); \
+ if (h != NULL) { \
+ h->flags = 0; /* domain is disabled by default */ \
+ h->nameA = name ? __itt_fstrdup(name) : NULL; \
+ h->nameW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->domain_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_W(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = NULL; \
+ h->strW = name ? _wcsdup(name) : NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#define NEW_STRING_HANDLE_A(gptr,h,h_tail,name) { \
+ h = (__itt_string_handle*)malloc(sizeof(__itt_string_handle)); \
+ if (h != NULL) { \
+ h->strA = name ? __itt_fstrdup(name) : NULL; \
+ h->strW = NULL; \
+ h->extra1 = 0; /* reserved */ \
+ h->extra2 = NULL; /* reserved */ \
+ h->next = NULL; \
+ if (h_tail == NULL) \
+ (gptr)->string_list = h; \
+ else \
+ h_tail->next = h; \
+ } \
+}
+
+#endif /* _ITTNOTIFY_CONFIG_H_ */
+
diff --git a/deps/v8/src/third_party/vtune/ittnotify_types.h b/deps/v8/src/third_party/vtune/ittnotify_types.h
new file mode 100644
index 000000000..736c1f5b5
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/ittnotify_types.h
@@ -0,0 +1,113 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef _ITTNOTIFY_TYPES_H_
+#define _ITTNOTIFY_TYPES_H_
+
+typedef enum ___itt_group_id
+{
+ __itt_group_none = 0,
+ __itt_group_legacy = 1<<0,
+ __itt_group_control = 1<<1,
+ __itt_group_thread = 1<<2,
+ __itt_group_mark = 1<<3,
+ __itt_group_sync = 1<<4,
+ __itt_group_fsync = 1<<5,
+ __itt_group_jit = 1<<6,
+ __itt_group_model = 1<<7,
+ __itt_group_splitter_min = 1<<7,
+ __itt_group_counter = 1<<8,
+ __itt_group_frame = 1<<9,
+ __itt_group_stitch = 1<<10,
+ __itt_group_heap = 1<<11,
+ __itt_group_splitter_max = 1<<12,
+ __itt_group_structure = 1<<12,
+ __itt_group_suppress = 1<<13,
+ __itt_group_all = -1
+} __itt_group_id;
+
+#pragma pack(push, 8)
+
+typedef struct ___itt_group_list
+{
+ __itt_group_id id;
+ const char* name;
+} __itt_group_list;
+
+#pragma pack(pop)
+
+#define ITT_GROUP_LIST(varname) \
+ static __itt_group_list varname[] = { \
+ { __itt_group_all, "all" }, \
+ { __itt_group_control, "control" }, \
+ { __itt_group_thread, "thread" }, \
+ { __itt_group_mark, "mark" }, \
+ { __itt_group_sync, "sync" }, \
+ { __itt_group_fsync, "fsync" }, \
+ { __itt_group_jit, "jit" }, \
+ { __itt_group_model, "model" }, \
+ { __itt_group_counter, "counter" }, \
+ { __itt_group_frame, "frame" }, \
+ { __itt_group_stitch, "stitch" }, \
+ { __itt_group_heap, "heap" }, \
+ { __itt_group_structure, "structure" }, \
+ { __itt_group_suppress, "suppress" }, \
+ { __itt_group_none, NULL } \
+ }
+
+#endif /* _ITTNOTIFY_TYPES_H_ */
+
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.cc b/deps/v8/src/third_party/vtune/jitprofiling.cc
new file mode 100644
index 000000000..b3952b321
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/jitprofiling.cc
@@ -0,0 +1,499 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include "ittnotify_config.h"
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#include <windows.h>
+#pragma optimize("", off)
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <pthread.h>
+#include <dlfcn.h>
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#include <malloc.h>
+#include <stdlib.h>
+
+#include "jitprofiling.h"
+
+static const char rcsid[] = "\n@(#) $Revision: 234474 $\n";
+
+#define DLL_ENVIRONMENT_VAR "VS_PROFILER"
+
+#ifndef NEW_DLL_ENVIRONMENT_VAR
+#if ITT_ARCH==ITT_ARCH_IA32
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER32"
+#else
+#define NEW_DLL_ENVIRONMENT_VAR "INTEL_JIT_PROFILER64"
+#endif
+#endif /* NEW_DLL_ENVIRONMENT_VAR */
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+#define DEFAULT_DLLNAME "JitPI.dll"
+HINSTANCE m_libHandle = NULL;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+#define DEFAULT_DLLNAME "libJitPI.so"
+void* m_libHandle = NULL;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+/* default location of JIT profiling agent on Android */
+#define ANDROID_JIT_AGENT_PATH "/data/intel/libittnotify.so"
+
+/* the function pointers */
+typedef unsigned int(*TPInitialize)(void);
+static TPInitialize FUNC_Initialize=NULL;
+
+typedef unsigned int(*TPNotify)(unsigned int, void*);
+static TPNotify FUNC_NotifyEvent=NULL;
+
+static iJIT_IsProfilingActiveFlags executionMode = iJIT_NOTHING_RUNNING;
+
+/* end collector dll part. */
+
+/* loadiJIT_Funcs() : this function is called just in the beginning and is responsible
+** to load the functions from BistroJavaCollector.dll
+** result:
+** on success: the functions loads, iJIT_DLL_is_missing=0, return value = 1.
+** on failure: the functions are NULL, iJIT_DLL_is_missing=1, return value = 0.
+*/
+static int loadiJIT_Funcs(void);
+
+/* global representing whether the BistroJavaCollector can't be loaded */
+static int iJIT_DLL_is_missing = 0;
+
+/* Virtual stack - the struct is used as a virtual stack for each thread.
+** Every thread initializes with a stack of size INIT_TOP_STACK.
+** Every method entry decreases from the current stack point,
+** and when a thread stack reaches its top of stack (return from the global function),
+** the top of stack and the current stack increase. Notice that when returning from a function
+** the stack pointer is the address of the function return.
+*/
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+static DWORD threadLocalStorageHandle = 0;
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+static pthread_key_t threadLocalStorageHandle = (pthread_key_t)0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+#define INIT_TOP_Stack 10000
+
+typedef struct
+{
+ unsigned int TopStack;
+ unsigned int CurrentStack;
+} ThreadStack, *pThreadStack;
+
+/* end of virtual stack. */
+
+/*
+** The function for reporting virtual-machine related events to VTune.
+** Note: when reporting iJVM_EVENT_TYPE_ENTER_NIDS, there is no need to fill in the stack_id
+** field in the iJIT_Method_NIDS structure, as VTune fills it.
+**
+** The return value in iJVM_EVENT_TYPE_ENTER_NIDS && iJVM_EVENT_TYPE_LEAVE_NIDS events
+** will be 0 in case of failure.
+** in iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED event it will be -1 if EventSpecificData == 0
+** otherwise it will be 0.
+*/
+
+ITT_EXTERN_C int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData)
+{
+ int ReturnValue;
+
+ /*******************************************************************************
+ ** This section is for debugging outside of VTune.
+ ** It creates the environment variables that indicates call graph mode.
+ ** If running outside of VTune remove the remark.
+ **
+
+ static int firstTime = 1;
+ char DoCallGraph[12] = "DoCallGraph";
+ if (firstTime)
+ {
+ firstTime = 0;
+ SetEnvironmentVariable( "BISTRO_COLLECTORS_DO_CALLGRAPH", DoCallGraph);
+ }
+
+ ** end of section.
+ *******************************************************************************/
+
+ /* initialization part - the functions have not been loaded yet. This part
+ ** will load the functions, and check if we are in Call Graph mode.
+ ** (for special treatment).
+ */
+ if (!FUNC_NotifyEvent)
+ {
+ if (iJIT_DLL_is_missing)
+ return 0;
+
+ // load the Function from the DLL
+ if (!loadiJIT_Funcs())
+ return 0;
+
+ /* Call Graph initialization. */
+ }
+
+ /* If the event is method entry/exit, check that in the current mode
+ ** VTune is allowed to receive it
+ */
+ if ((event_type == iJVM_EVENT_TYPE_ENTER_NIDS || event_type == iJVM_EVENT_TYPE_LEAVE_NIDS) &&
+ (executionMode != iJIT_CALLGRAPH_ON))
+ {
+ return 0;
+ }
+ /* This section is performed when method enter event occurs.
+ ** It updates the virtual stack, or creates it if this is the first
+ ** method entry in the thread. The stack pointer is decreased.
+ */
+ if (event_type == iJVM_EVENT_TYPE_ENTER_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ // initialize the stack.
+ threadStack = (pThreadStack) calloc (sizeof(ThreadStack), 1);
+ threadStack->TopStack = INIT_TOP_Stack;
+ threadStack->CurrentStack = INIT_TOP_Stack;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue(threadLocalStorageHandle,(void*)threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle,(void*)threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ // decrease the stack.
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = (threadStack->CurrentStack)--;
+ }
+
+ /* This section is performed when method leave event occurs
+ ** It updates the virtual stack.
+ ** Increases the stack pointer.
+ ** If the stack pointer reached the top (left the global function)
+ ** increase the pointer and the top pointer.
+ */
+ if (event_type == iJVM_EVENT_TYPE_LEAVE_NIDS)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_NIDS) EventSpecificData)->method_id <= 999 )
+ return 0;
+
+ if (!threadStack)
+ {
+ /* Error: first report in this thread is method exit */
+ exit (1);
+ }
+
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = ++(threadStack->CurrentStack) + 1;
+
+ if (((piJIT_Method_NIDS) EventSpecificData)->stack_id > threadStack->TopStack)
+ ((piJIT_Method_NIDS) EventSpecificData)->stack_id = (unsigned int)-1;
+ }
+
+ if (event_type == iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED)
+ {
+ // check for use of reserved method IDs
+ if ( ((piJIT_Method_Load) EventSpecificData)->method_id <= 999 )
+ return 0;
+ }
+
+ ReturnValue = (int)FUNC_NotifyEvent(event_type, EventSpecificData);
+
+ return ReturnValue;
+}
+
+ITT_EXTERN_C void JITAPI iJIT_RegisterCallbackEx(void *userdata, iJIT_ModeChangedEx NewModeCallBackFuncEx) // The new mode call back routine
+{
+ // is it already missing... or the load of functions from the DLL failed
+ if (iJIT_DLL_is_missing || !loadiJIT_Funcs())
+ {
+ NewModeCallBackFuncEx(userdata, iJIT_NO_NOTIFICATIONS); // then do not bother with notifications
+ /* Error: could not load JIT functions. */
+ return;
+ }
+ // nothing to do with the callback
+}
+
+/*
+** This function allows the user to query in which mode, if at all, VTune is running
+*/
+ITT_EXTERN_C iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive()
+{
+ if (!iJIT_DLL_is_missing)
+ {
+ loadiJIT_Funcs();
+ }
+
+ return executionMode;
+}
+#include <stdio.h>
+/* this function loads the collector dll (BistroJavaCollector) and the relevant functions.
+** on success: all functions load, iJIT_DLL_is_missing = 0, return value = 1.
+** on failure: all functions are NULL, iJIT_DLL_is_missing = 1, return value = 0.
+*/
+static int loadiJIT_Funcs()
+{
+ static int bDllWasLoaded = 0;
+ char *dllName = (char*)rcsid; // !!! Just to avoid unused code elimination !!!
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ DWORD dNameLength = 0;
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if(bDllWasLoaded)
+ {// dll was already loaded, no need to do it for the second time
+ return 1;
+ }
+
+ // Assumes that the DLL will not be found
+ iJIT_DLL_is_missing = 1;
+ FUNC_NotifyEvent = NULL;
+
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ // try to get the dll name from the environment
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ dNameLength = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(NEW_DLL_ENVIRONMENT_VAR, dllName, dNameLength);
+ if (envret)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = LoadLibraryExA(dllName, NULL, LOAD_WITH_ALTERED_SEARCH_PATH);
+ }
+ free(dllName);
+ } else {
+ // Try to use old VS_PROFILER variable
+ dNameLength = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, NULL, 0);
+ if (dNameLength)
+ {
+ DWORD envret = 0;
+ dllName = (char*)malloc(sizeof(char) * (dNameLength + 1));
+ envret = GetEnvironmentVariableA(DLL_ENVIRONMENT_VAR, dllName, dNameLength);
+ if (envret)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = LoadLibraryA(dllName);
+ }
+ free(dllName);
+ }
+ }
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dllName = getenv(NEW_DLL_ENVIRONMENT_VAR);
+ if (!dllName) {
+ dllName = getenv(DLL_ENVIRONMENT_VAR);
+ }
+#ifdef ANDROID
+ if (!dllName)
+ dllName = ANDROID_JIT_AGENT_PATH;
+#endif
+ if (dllName)
+ {
+ // Try to load the dll from the PATH...
+ m_libHandle = dlopen(dllName, RTLD_LAZY);
+ }
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+
+ if (!m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ m_libHandle = LoadLibraryA(DEFAULT_DLLNAME);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = dlopen(DEFAULT_DLLNAME, RTLD_LAZY);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ // if the dll wasn't loaded - exit.
+ if (!m_libHandle)
+ {
+ iJIT_DLL_is_missing = 1; // don't try to initialize JIT agent the second time
+ return 0;
+ }
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_NotifyEvent = (TPNotify)GetProcAddress(m_libHandle, "NotifyEvent");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_NotifyEvent = reinterpret_cast<TPNotify>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "NotifyEvent")));
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_NotifyEvent)
+ {
+ FUNC_Initialize = NULL;
+ return 0;
+ }
+
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FUNC_Initialize = (TPInitialize)GetProcAddress(m_libHandle, "Initialize");
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ FUNC_Initialize = reinterpret_cast<TPInitialize>(reinterpret_cast<intptr_t>(dlsym(m_libHandle, "Initialize")));
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (!FUNC_Initialize)
+ {
+ FUNC_NotifyEvent = NULL;
+ return 0;
+ }
+
+ executionMode = (iJIT_IsProfilingActiveFlags)FUNC_Initialize();
+ if (executionMode != iJIT_SAMPLING_ON)
+ executionMode = iJIT_SAMPLING_ON;
+
+ bDllWasLoaded = 1;
+ iJIT_DLL_is_missing = 0; // DLL is ok.
+
+ /*
+ ** Call Graph mode: init the thread local storage
+ ** (need to store the virtual stack there).
+ */
+ if ( executionMode == iJIT_CALLGRAPH_ON )
+ {
+ // Allocate a thread local storage slot for the thread "stack"
+ if (!threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ threadLocalStorageHandle = TlsAlloc();
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_create(&threadLocalStorageHandle, NULL);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+
+ return 1;
+}
+
+/*
+** This function should be called by the user whenever a thread ends, to free the thread
+** "virtual stack" storage
+*/
+ITT_EXTERN_C void JITAPI FinalizeThread()
+{
+ if (threadLocalStorageHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ pThreadStack threadStack = (pThreadStack)TlsGetValue (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pThreadStack threadStack = (pThreadStack)pthread_getspecific(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ if (threadStack)
+ {
+ free (threadStack);
+ threadStack = NULL;
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsSetValue (threadLocalStorageHandle, threadStack);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_setspecific(threadLocalStorageHandle, threadStack);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ }
+ }
+}
+
+/*
+** This function should be called by the user when the process ends, to free the local
+** storage index
+*/
+ITT_EXTERN_C void JITAPI FinalizeProcess()
+{
+ if (m_libHandle)
+ {
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ FreeLibrary(m_libHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ dlclose(m_libHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ m_libHandle = NULL;
+ }
+
+ if (threadLocalStorageHandle)
+#if ITT_PLATFORM==ITT_PLATFORM_WIN
+ TlsFree (threadLocalStorageHandle);
+#else /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+ pthread_key_delete(threadLocalStorageHandle);
+#endif /* ITT_PLATFORM==ITT_PLATFORM_WIN */
+}
+
+/*
+** This function should be called by the user for any method once.
+** The function will return a unique method ID, the user should maintain the ID for each
+** method
+*/
+ITT_EXTERN_C unsigned int JITAPI iJIT_GetNewMethodID()
+{
+ static unsigned int methodID = 0x100000;
+
+ if (methodID == 0)
+ return 0; // ERROR : this is not a valid value
+
+ return methodID++;
+}
+
diff --git a/deps/v8/src/third_party/vtune/jitprofiling.h b/deps/v8/src/third_party/vtune/jitprofiling.h
new file mode 100644
index 000000000..abd6d8ca7
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/jitprofiling.h
@@ -0,0 +1,298 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright (c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#ifndef __JITPROFILING_H__
+#define __JITPROFILING_H__
+
+/*
+ * Various constants used by functions
+ */
+
+/* event notification */
+typedef enum iJIT_jvm_event
+{
+
+ /* shutdown */
+
+ /*
+ * Program exiting EventSpecificData NA
+ */
+ iJVM_EVENT_TYPE_SHUTDOWN = 2,
+
+ /* JIT profiling */
+
+ /*
+ * issued after method code jitted into memory but before code is executed
+ * EventSpecificData is an iJIT_Method_Load
+ */
+ iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED=13,
+
+ /* issued before unload. Method code will no longer be executed, but code
+ * and info are still in memory. The VTune profiler may capture method
+ * code only at this point EventSpecificData is iJIT_Method_Id
+ */
+ iJVM_EVENT_TYPE_METHOD_UNLOAD_START,
+
+ /* Method Profiling */
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be entered EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_ENTER_NIDS = 19,
+
+ /* method name, Id and stack is supplied
+ * issued when a method is about to be left EventSpecificData is
+ * iJIT_Method_NIDS
+ */
+ iJVM_EVENT_TYPE_LEAVE_NIDS
+} iJIT_JVM_EVENT;
+
+typedef enum _iJIT_ModeFlags
+{
+ /* No need to Notify VTune, since VTune is not running */
+ iJIT_NO_NOTIFICATIONS = 0x0000,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ * )
+ * for all the method already jitted
+ */
+ iJIT_BE_NOTIFY_ON_LOAD = 0x0001,
+
+ /* when turned on the jit must call
+ * iJIT_NotifyEvent
+ * (
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_FINISHED,
+ * ) for all the method that are unloaded
+ */
+ iJIT_BE_NOTIFY_ON_UNLOAD = 0x0002,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls on
+ * method entries
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_ENTRY = 0x0004,
+
+ /* when turned on the jit must instrument all
+ * the currently jited code with calls
+ * on method exit
+ */
+ iJIT_BE_NOTIFY_ON_METHOD_EXIT = 0x0008
+
+} iJIT_ModeFlags;
+
+
+ /* Flags used by iJIT_IsProfilingActive() */
+typedef enum _iJIT_IsProfilingActiveFlags
+{
+ /* No profiler is running. Currently not used */
+ iJIT_NOTHING_RUNNING = 0x0000,
+
+ /* Sampling is running. This is the default value
+ * returned by iJIT_IsProfilingActive()
+ */
+ iJIT_SAMPLING_ON = 0x0001,
+
+ /* Call Graph is running */
+ iJIT_CALLGRAPH_ON = 0x0002
+
+} iJIT_IsProfilingActiveFlags;
+
+/* Enumerator for the environment of methods*/
+typedef enum _iJDEnvironmentType
+{
+ iJDE_JittingAPI = 2
+} iJDEnvironmentType;
+
+/**********************************
+ * Data structures for the events *
+ **********************************/
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_METHOD_UNLOAD_START
+ */
+
+typedef struct _iJIT_Method_Id
+{
+ /* Id of the method (same as the one passed in
+ * the iJIT_Method_Load struct
+ */
+ unsigned int method_id;
+
+} *piJIT_Method_Id, iJIT_Method_Id;
+
+
+/* structure for the events:
+ * iJVM_EVENT_TYPE_ENTER_NIDS,
+ * iJVM_EVENT_TYPE_LEAVE_NIDS,
+ * iJVM_EVENT_TYPE_EXCEPTION_OCCURRED_NIDS
+ */
+
+typedef struct _iJIT_Method_NIDS
+{
+ /* unique method ID */
+ unsigned int method_id;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ unsigned int stack_id;
+
+ /* method name (just the method, without the class) */
+ char* method_name;
+} *piJIT_Method_NIDS, iJIT_Method_NIDS;
+
+/* structures for the events:
+ * iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED
+ */
+
+typedef struct _LineNumberInfo
+{
+ /* x86 Offset from the begining of the method*/
+ unsigned int Offset;
+
+ /* source line number from the begining of the source file */
+ unsigned int LineNumber;
+
+} *pLineNumberInfo, LineNumberInfo;
+
+typedef struct _iJIT_Method_Load
+{
+ /* unique method ID - can be any unique value, (except 0 - 999) */
+ unsigned int method_id;
+
+ /* method name (can be with or without the class and signature, in any case
+ * the class name will be added to it)
+ */
+ char* method_name;
+
+ /* virtual address of that method - This determines the method range for the
+ * iJVM_EVENT_TYPE_ENTER/LEAVE_METHOD_ADDR events
+ */
+ void* method_load_address;
+
+ /* Size in memory - Must be exact */
+ unsigned int method_size;
+
+ /* Line Table size in number of entries - Zero if none */
+ unsigned int line_number_size;
+
+ /* Pointer to the begining of the line numbers info array */
+ pLineNumberInfo line_number_table;
+
+ /* unique class ID */
+ unsigned int class_id;
+
+ /* class file name */
+ char* class_file_name;
+
+ /* source file name */
+ char* source_file_name;
+
+ /* bits supplied by the user for saving in the JIT file */
+ void* user_data;
+
+ /* the size of the user data buffer */
+ unsigned int user_data_size;
+
+ /* NOTE: no need to fill this field, it's filled by VTune */
+ iJDEnvironmentType env;
+
+} *piJIT_Method_Load, iJIT_Method_Load;
+
+/* API Functions */
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef CDECL
+# if defined WIN32 || defined _WIN32
+# define CDECL __cdecl
+# else /* defined WIN32 || defined _WIN32 */
+# if defined _M_X64 || defined _M_AMD64 || defined __x86_64__
+# define CDECL /* not actual on x86_64 platform */
+# else /* _M_X64 || _M_AMD64 || __x86_64__ */
+# define CDECL __attribute__ ((cdecl))
+# endif /* _M_X64 || _M_AMD64 || __x86_64__ */
+# endif /* defined WIN32 || defined _WIN32 */
+#endif /* CDECL */
+
+#define JITAPI CDECL
+
+/* called when the settings are changed with new settings */
+typedef void (*iJIT_ModeChangedEx)(void *UserData, iJIT_ModeFlags Flags);
+
+int JITAPI iJIT_NotifyEvent(iJIT_JVM_EVENT event_type, void *EventSpecificData);
+
+/* The new mode call back routine */
+void JITAPI iJIT_RegisterCallbackEx(void *userdata,
+ iJIT_ModeChangedEx NewModeCallBackFuncEx);
+
+iJIT_IsProfilingActiveFlags JITAPI iJIT_IsProfilingActive(void);
+
+void JITAPI FinalizeThread(void);
+
+void JITAPI FinalizeProcess(void);
+
+unsigned int JITAPI iJIT_GetNewMethodID(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* __JITPROFILING_H__ */
diff --git a/deps/v8/src/third_party/vtune/v8-vtune.h b/deps/v8/src/third_party/vtune/v8-vtune.h
new file mode 100644
index 000000000..36c11e6a1
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/v8-vtune.h
@@ -0,0 +1,69 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef V8_VTUNE_H_
+#define V8_VTUNE_H_
+
+namespace vTune {
+
+void InitilizeVtuneForV8();
+
+} // namespace vTune
+
+
+#endif // V8_VTUNE_H_
+
diff --git a/deps/v8/src/third_party/vtune/v8vtune.gyp b/deps/v8/src/third_party/vtune/v8vtune.gyp
new file mode 100644
index 000000000..cabd37ac7
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/v8vtune.gyp
@@ -0,0 +1,56 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+{
+ 'includes': ['../../../build/common.gypi'],
+ 'targets': [
+ {
+ 'target_name': 'v8_vtune',
+ 'type': 'static_library',
+ 'dependencies': [
+ '../../../tools/gyp/v8.gyp:v8',
+ ],
+ 'sources': [
+ 'ittnotify_config.h',
+ 'ittnotify_types.h',
+ 'jitprofiling.cc',
+ 'jitprofiling.h',
+ 'v8-vtune.h',
+ 'vtune-jit.cc',
+ 'vtune-jit.h',
+ ],
+ 'direct_dependent_settings': {
+ 'defines': ['ENABLE_VTUNE_JIT_INTERFACE',],
+ 'conditions': [
+ ['OS != "win"', {
+ 'libraries': ['-ldl',],
+ }],
+ ],
+ },
+ },
+ ],
+}
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.cc b/deps/v8/src/third_party/vtune/vtune-jit.cc
new file mode 100644
index 000000000..6ff595fdf
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/vtune-jit.cc
@@ -0,0 +1,279 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+*/
+#include <string.h>
+
+#ifdef WIN32
+#include <hash_map>
+using namespace std;
+#else
+// To avoid GCC 4.4 compilation warning about hash_map being deprecated.
+#define OLD_DEPRECATED __DEPRECATED
+#undef __DEPRECATED
+#include <ext/hash_map>
+#define __DEPRECATED OLD_DEPRECATED
+using namespace __gnu_cxx;
+#endif
+
+#include <list>
+
+#include "v8-vtune.h"
+#include "vtune-jit.h"
+
+namespace vTune {
+namespace internal {
+
+
+// This class is used to record the JITted code position info for JIT
+// code profiling.
+class JITCodeLineInfo {
+ public:
+ JITCodeLineInfo() { }
+
+ void SetPosition(intptr_t pc, int pos) {
+ AddCodeLineInfo(LineNumInfo(pc, pos));
+ }
+
+ struct LineNumInfo {
+ LineNumInfo(intptr_t pc, int pos)
+ : pc_(pc), pos_(pos) { }
+
+ intptr_t pc_;
+ int pos_;
+ };
+
+ std::list<LineNumInfo>* GetLineNumInfo() {
+ return &line_num_info_;
+ }
+
+ private:
+ void AddCodeLineInfo(const LineNumInfo& line_info) {
+ line_num_info_.push_back(line_info);
+ }
+ std::list<LineNumInfo> line_num_info_;
+};
+
+struct SameCodeObjects {
+ bool operator () (void* key1, void* key2) const {
+ return key1 == key2;
+ }
+};
+
+struct HashForCodeObject {
+ uint32_t operator () (void* code) const {
+ static const uintptr_t kGoldenRatio = 2654435761u;
+ uintptr_t hash = reinterpret_cast<uintptr_t>(code);
+ return static_cast<uint32_t>(hash * kGoldenRatio);
+ }
+};
+
+#ifdef WIN32
+typedef hash_map<void*, void*> JitInfoMap;
+#else
+typedef hash_map<void*, void*, HashForCodeObject, SameCodeObjects> JitInfoMap;
+#endif
+
+static JitInfoMap* GetEntries() {
+ static JitInfoMap* entries;
+ if (entries == NULL) {
+ entries = new JitInfoMap();
+ }
+ return entries;
+}
+
+static bool IsLineInfoTagged(void* ptr) {
+ return 0 != (reinterpret_cast<intptr_t>(ptr));
+}
+
+static JITCodeLineInfo* UntagLineInfo(void* ptr) {
+ return reinterpret_cast<JITCodeLineInfo*>(
+ reinterpret_cast<intptr_t>(ptr));
+}
+
+// The parameter str is a mixed pattern which contains the
+// function name and some other info. It comes from all the
+// Logger::CodeCreateEvent(...) function. This funtion get the
+// pure function name from the input parameter.
+static char* GetFunctionNameFromMixedName(const char* str, int length) {
+ int index = 0;
+ int count = 0;
+ char* start_ptr = NULL;
+
+ while (str[index++] != ':' && (index < length)) {}
+
+ if (str[index] == '*' || str[index] == '~' ) index++;
+ if (index >= length) return NULL;
+
+ start_ptr = const_cast<char*>(str + index);
+
+ while (index < length && str[index++] != ' ') {
+ count++;
+ }
+
+ char* result = new char[count + 1];
+ memcpy(result, start_ptr, count);
+ result[count] = '\0';
+
+ return result;
+}
+
+// The JitCodeEventHandler for Vtune.
+void VTUNEJITInterface::event_handler(const v8::JitCodeEvent* event) {
+ if (VTUNERUNNING && event != NULL) {
+ switch (event->type) {
+ case v8::JitCodeEvent::CODE_ADDED: {
+ char* temp_file_name = NULL;
+ char* temp_method_name =
+ GetFunctionNameFromMixedName(event->name.str,
+ static_cast<int>(event->name.len));
+ iJIT_Method_Load jmethod;
+ memset(&jmethod, 0, sizeof jmethod);
+ jmethod.method_id = iJIT_GetNewMethodID();
+ jmethod.method_load_address = event->code_start;
+ jmethod.method_size = static_cast<unsigned int>(event->code_len);
+ jmethod.method_name = temp_method_name;
+
+ Handle<Script> script = event->script;
+
+ if (*script != NULL) {
+ // Get the source file name and set it to jmethod.source_file_name
+ if ((*script->GetScriptName())->IsString()) {
+ Handle<String> script_name =
+ Handle<String>(String::Cast(*script->GetScriptName()));
+ temp_file_name = new char[script_name->Length() + 1];
+ script_name->WriteAscii(temp_file_name);
+ jmethod.source_file_name = temp_file_name;
+ }
+
+ JitInfoMap::iterator entry =
+ GetEntries()->find(event->code_start);
+ if (entry != GetEntries()->end() && IsLineInfoTagged(entry->first)) {
+ JITCodeLineInfo* line_info = UntagLineInfo(entry->second);
+ // Get the line_num_info and set it to jmethod.line_number_table
+ std::list<JITCodeLineInfo::LineNumInfo>* vtunelineinfo =
+ line_info->GetLineNumInfo();
+
+ jmethod.line_number_size = (unsigned int)vtunelineinfo->size();
+ jmethod.line_number_table =
+ reinterpret_cast<LineNumberInfo*>(
+ malloc(sizeof(LineNumberInfo)*jmethod.line_number_size));
+
+ std::list<JITCodeLineInfo::LineNumInfo>::iterator Iter;
+ int index = 0;
+ for (Iter = vtunelineinfo->begin();
+ Iter != vtunelineinfo->end();
+ Iter++) {
+ jmethod.line_number_table[index].Offset =
+ static_cast<unsigned int>(Iter->pc_);
+ jmethod.line_number_table[index++].LineNumber =
+ script->GetLineNumber(Iter->pos_)+1;
+ }
+ GetEntries()->erase(event->code_start);
+ }
+ }
+
+ iJIT_NotifyEvent(iJVM_EVENT_TYPE_METHOD_LOAD_FINISHED,
+ reinterpret_cast<void*>(&jmethod));
+ if (temp_method_name)
+ delete []temp_method_name;
+ if (temp_file_name)
+ delete []temp_file_name;
+ break;
+ }
+ // TODO(chunyang.dai@intel.com): code_move will be supported.
+ case v8::JitCodeEvent::CODE_MOVED:
+ break;
+ // Currently the CODE_REMOVED event is not issued.
+ case v8::JitCodeEvent::CODE_REMOVED:
+ break;
+ case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
+ JITCodeLineInfo* line_info =
+ reinterpret_cast<JITCodeLineInfo*>(event->user_data);
+ if (line_info != NULL) {
+ line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset),
+ static_cast<int>(event->line_info.pos));
+ }
+ break;
+ }
+ case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: {
+ v8::JitCodeEvent* temp_event = const_cast<v8::JitCodeEvent*>(event);
+ temp_event->user_data = new JITCodeLineInfo();
+ break;
+ }
+ case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
+ GetEntries()->insert(std::pair <void*, void*>(event->code_start, event->user_data));
+ break;
+ }
+ default:
+ break;
+ }
+ }
+ return;
+}
+
+} // namespace internal
+
+void InitilizeVtuneForV8() {
+ if (v8::V8::Initialize()) {
+ v8::V8::SetFlagsFromString("--nocompact_code_space",
+ (int)strlen("--nocompact_code_space"));
+ v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
+ vTune::internal::VTUNEJITInterface::event_handler);
+ }
+}
+
+} // namespace vTune
diff --git a/deps/v8/src/third_party/vtune/vtune-jit.h b/deps/v8/src/third_party/vtune/vtune-jit.h
new file mode 100644
index 000000000..42b8c3da1
--- /dev/null
+++ b/deps/v8/src/third_party/vtune/vtune-jit.h
@@ -0,0 +1,82 @@
+/*
+ This file is provided under a dual BSD/GPLv2 license. When using or
+ redistributing this file, you may do so under either license.
+
+ GPL LICENSE SUMMARY
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of version 2 of the GNU General Public License as
+ published by the Free Software Foundation.
+
+ This program is distributed in the hope that it will be useful, but
+ WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+ The full GNU General Public License is included in this distribution
+ in the file called LICENSE.GPL.
+
+ Contact Information:
+ http://software.intel.com/en-us/articles/intel-vtune-amplifier-xe/
+
+ BSD LICENSE
+
+ Copyright(c) 2005-2012 Intel Corporation. All rights reserved.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef VTUNE_VTUNE_JIT_H_
+#define VTUNE_VTUNE_JIT_H_
+
+#include "jitprofiling.h"
+#include "../../../include/v8.h"
+
+#define VTUNERUNNING (iJIT_IsProfilingActive() == iJIT_SAMPLING_ON)
+
+namespace vTune {
+namespace internal {
+using namespace v8;
+class VTUNEJITInterface {
+ public:
+ static void event_handler(const v8::JitCodeEvent* event);
+
+ private:
+ //static Mutex* vtunemutex_;
+};
+
+
+} } // namespace vTune::internal
+
+
+#endif // VTUNE_VTUNE_JIT_H_
+
diff --git a/deps/v8/src/typedarray.js b/deps/v8/src/typedarray.js
index c3e3ebd66..24fcf1e45 100644
--- a/deps/v8/src/typedarray.js
+++ b/deps/v8/src/typedarray.js
@@ -27,8 +27,14 @@
"use strict";
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
var $ArrayBuffer = global.__ArrayBuffer;
+// -------------------------------------------------------------------
+
function ArrayBufferConstructor(byteLength) { // length = 1
if (%_IsConstructCall()) {
var l = TO_POSITIVE_INTEGER(byteLength);
@@ -76,15 +82,74 @@ function ArrayBufferSlice(start, end) {
return result;
}
+// --------------- Typed Arrays ---------------------
+
+function CreateTypedArrayConstructor(name, elementSize, arrayId, constructor) {
+ return function (buffer, byteOffset, length) {
+ if (%_IsConstructCall()) {
+ if (!IS_ARRAYBUFFER(buffer)) {
+ throw MakeTypeError("Type error!");
+ }
+ var offset = IS_UNDEFINED(byteOffset)
+ ? 0 : offset = TO_POSITIVE_INTEGER(byteOffset);
+
+ if (offset % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "start offset", name, elementSize);
+ }
+ var bufferByteLength = %ArrayBufferGetByteLength(buffer);
+ if (offset >= bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_offset");
+ }
+
+ var newByteLength;
+ var newLength;
+ if (IS_UNDEFINED(length)) {
+ if (bufferByteLength % elementSize !== 0) {
+ throw MakeRangeError("invalid_typed_array_alignment",
+ "byte length", name, elementSize);
+ }
+ newByteLength = bufferByteLength - offset;
+ newLength = newByteLength / elementSize;
+ } else {
+ var newLength = TO_POSITIVE_INTEGER(length);
+ newByteLength = newLength * elementSize;
+ }
+ if (newByteLength > bufferByteLength) {
+ throw MakeRangeError("invalid_typed_array_length");
+ }
+ %TypedArrayInitialize(this, arrayId, buffer, offset, newByteLength);
+ } else {
+ return new constructor(buffer, byteOffset, length);
+ }
+ }
+}
+
+function TypedArrayGetBuffer() {
+ return %TypedArrayGetBuffer(this);
+}
+
+function TypedArrayGetByteLength() {
+ return %TypedArrayGetByteLength(this);
+}
+
+function TypedArrayGetByteOffset() {
+ return %TypedArrayGetByteOffset(this);
+}
+
+function TypedArrayGetLength() {
+ return %TypedArrayGetLength(this);
+}
// -------------------------------------------------------------------
-(function () {
+function SetUpArrayBuffer() {
%CheckIsBootstrapping();
- // Set up the Uint16Array constructor function.
+ // Set up the ArrayBuffer constructor function.
%SetCode($ArrayBuffer, ArrayBufferConstructor);
+ %FunctionSetPrototype($ArrayBuffer, new $Object());
// Set up the constructor property on the ArrayBuffer prototype object.
%SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
@@ -94,5 +159,34 @@ function ArrayBufferSlice(start, end) {
InstallFunctions($ArrayBuffer.prototype, DONT_ENUM, $Array(
"slice", ArrayBufferSlice
));
+}
+
+SetUpArrayBuffer();
+
+function SetupTypedArray(arrayId, name, constructor, elementSize) {
+ var f = CreateTypedArrayConstructor(name, elementSize,
+ arrayId, constructor);
+ %SetCode(constructor, f);
+ %FunctionSetPrototype(constructor, new $Object());
+
+ %SetProperty(constructor.prototype,
+ "constructor", constructor, DONT_ENUM);
+ %SetProperty(constructor.prototype,
+ "BYTES_PER_ELEMENT", elementSize,
+ READ_ONLY | DONT_ENUM | DONT_DELETE);
+ InstallGetter(constructor.prototype, "buffer", TypedArrayGetBuffer);
+ InstallGetter(constructor.prototype, "byteOffset", TypedArrayGetByteOffset);
+ InstallGetter(constructor.prototype, "byteLength", TypedArrayGetByteLength);
+ InstallGetter(constructor.prototype, "length", TypedArrayGetLength);
+}
+
+// arrayIds below should be synchronized with Runtime_TypedArrayInitialize.
+SetupTypedArray(1, "Uint8Array", global.__Uint8Array, 1);
+SetupTypedArray(2, "Int8Array", global.__Int8Array, 1);
+SetupTypedArray(3, "Uint16Array", global.__Uint16Array, 2);
+SetupTypedArray(4, "Int16Array", global.__Int16Array, 2);
+SetupTypedArray(5, "Uint32Array", global.__Uint32Array, 4);
+SetupTypedArray(6, "Int32Array", global.__Int32Array, 4);
+SetupTypedArray(7, "Float32Array", global.__Float32Array, 4);
+SetupTypedArray(8, "Float64Array", global.__Float64Array, 8);
-})();
diff --git a/deps/v8/src/unicode-inl.h b/deps/v8/src/unicode-inl.h
index 02be45768..f861f9f2d 100644
--- a/deps/v8/src/unicode-inl.h
+++ b/deps/v8/src/unicode-inl.h
@@ -30,6 +30,7 @@
#include "unicode.h"
#include "checks.h"
+#include "platform.h"
namespace unibrow {
@@ -202,7 +203,7 @@ unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
unsigned buffer_length =
last_byte_of_buffer_unused_ ? kBufferSize - 1 : kBufferSize;
unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
- memcpy(data, buffer_, memcpy_length*sizeof(uint16_t));
+ v8::internal::OS::MemCopy(data, buffer_, memcpy_length*sizeof(uint16_t));
if (length <= buffer_length) return length;
ASSERT(unbuffered_start_ != NULL);
// Copy the rest the slow way.
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 1de22f8ae..4e3f084af 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -25,11 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
// This file contains support for URI manipulations written in
// JavaScript.
-// Expect $String = global.String;
-
// Lazily initialized.
var hexCharArray = 0;
var hexCharCodeArray = 0;
@@ -437,6 +441,7 @@ function URIUnescape(str) {
function SetUpUri() {
%CheckIsBootstrapping();
+
// Set up non-enumerable URI functions on the global object and set
// their names.
InstallFunctions(global, DONT_ENUM, $Array(
diff --git a/deps/v8/src/utils.cc b/deps/v8/src/utils.cc
index 7e8c088dd..846261520 100644
--- a/deps/v8/src/utils.cc
+++ b/deps/v8/src/utils.cc
@@ -28,6 +28,7 @@
#include <stdarg.h>
#include "../include/v8stdint.h"
#include "checks.h"
+#include "platform.h"
#include "utils.h"
namespace v8 {
@@ -46,9 +47,9 @@ void SimpleStringBuilder::AddString(const char* s) {
void SimpleStringBuilder::AddSubstring(const char* s, int n) {
- ASSERT(!is_finalized() && position_ + n < buffer_.length());
+ ASSERT(!is_finalized() && position_ + n <= buffer_.length());
ASSERT(static_cast<size_t>(n) <= strlen(s));
- memcpy(&buffer_[position_], s, n * kCharSize);
+ OS::MemCopy(&buffer_[position_], s, n * kCharSize);
position_ += n;
}
@@ -79,7 +80,13 @@ void SimpleStringBuilder::AddDecimalInteger(int32_t value) {
char* SimpleStringBuilder::Finalize() {
- ASSERT(!is_finalized() && position_ < buffer_.length());
+ ASSERT(!is_finalized() && position_ <= buffer_.length());
+ // If there is no space for null termination, overwrite last character.
+ if (position_ == buffer_.length()) {
+ position_--;
+ // Print ellipsis.
+ for (int i = 3; i > 0 && position_ > i; --i) buffer_[position_ - i] = '.';
+ }
buffer_[position_] = '\0';
// Make sure nobody managed to add a 0-character to the
// buffer while building the string.
diff --git a/deps/v8/src/utils.h b/deps/v8/src/utils.h
index c391b9c43..b84d59238 100644
--- a/deps/v8/src/utils.h
+++ b/deps/v8/src/utils.h
@@ -32,9 +32,9 @@
#include <string.h>
#include <climits>
-#include "globals.h"
-#include "checks.h"
#include "allocation.h"
+#include "checks.h"
+#include "globals.h"
namespace v8 {
namespace internal {
@@ -494,6 +494,7 @@ class EmbeddedVector : public Vector<T> {
// When copying, make underlying Vector to reference our buffer.
EmbeddedVector(const EmbeddedVector& rhs)
: Vector<T>(rhs) {
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
set_start(buffer_);
}
@@ -501,6 +502,7 @@ class EmbeddedVector : public Vector<T> {
EmbeddedVector& operator=(const EmbeddedVector& rhs) {
if (this == &rhs) return *this;
Vector<T>::operator=(rhs);
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(buffer_, rhs.buffer_, sizeof(T) * kSize);
this->set_start(buffer_);
return *this;
@@ -876,6 +878,7 @@ struct BitCastHelper {
INLINE(static Dest cast(const Source& source)) {
Dest dest;
+ // TODO(jkummerow): Refactor #includes and use OS::MemCopy() instead.
memcpy(&dest, &source, sizeof(dest));
return dest;
}
@@ -1019,6 +1022,9 @@ class EnumSet {
void Intersect(const EnumSet& set) { bits_ &= set.bits_; }
T ToIntegral() const { return bits_; }
bool operator==(const EnumSet& set) { return bits_ == set.bits_; }
+ EnumSet<E, T> operator|(const EnumSet& set) const {
+ return EnumSet<E, T>(bits_ | set.bits_);
+ }
private:
T Mask(E element) const {
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index 2e8cd50e6..274128ed4 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -37,10 +37,10 @@
#include "heap-profiler.h"
#include "hydrogen.h"
#include "lithium-allocator.h"
-#include "log.h"
#include "objects.h"
#include "once.h"
#include "platform.h"
+#include "sampler.h"
#include "runtime-profiler.h"
#include "serialize.h"
#include "store-buffer.h"
@@ -123,6 +123,7 @@ void V8::TearDown() {
delete call_completed_callbacks_;
call_completed_callbacks_ = NULL;
+ Sampler::TearDown();
OS::TearDown();
}
@@ -270,16 +271,15 @@ void V8::InitializeOncePerProcessImpl() {
}
if (FLAG_trace_hydrogen) FLAG_parallel_recompilation = false;
OS::SetUp();
+ Sampler::SetUp();
CPU::SetUp();
use_crankshaft_ = FLAG_crankshaft
&& !Serializer::enabled()
&& CPU::SupportsCrankshaft();
OS::PostSetUp();
- RuntimeProfiler::GlobalSetUp();
ElementsAccessor::InitializeOncePerProcess();
LOperand::SetUpCaches();
SetUpJSCallerSavedCodeData();
- SamplerRegistry::SetUp();
ExternalReference::SetUp();
}
diff --git a/deps/v8/src/v8conversions.h b/deps/v8/src/v8conversions.h
index 0147d8c37..9d618af98 100644
--- a/deps/v8/src/v8conversions.h
+++ b/deps/v8/src/v8conversions.h
@@ -55,6 +55,19 @@ double StringToDouble(UnicodeCache* unicode_cache,
// Converts a string into an integer.
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
+// Converts a number into size_t.
+inline size_t NumberToSize(Isolate* isolate,
+ Object* number) {
+ NoHandleAllocation hc(isolate);
+ if (number->IsSmi()) {
+ return Smi::cast(number)->value();
+ } else {
+ ASSERT(number->IsHeapNumber());
+ double value = HeapNumber::cast(number)->value();
+ return static_cast<size_t>(value);
+ }
+}
+
} } // namespace v8::internal
#endif // V8_V8CONVERSIONS_H_
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 4b4d312f9..82e30f504 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -163,7 +163,6 @@ class RelocInfo;
class Deserializer;
class MessageLocation;
class ObjectGroup;
-class TickSample;
class VirtualMemory;
class Mutex;
@@ -433,11 +432,10 @@ enum CpuFeature { SSE4_1 = 32 + 19, // x86
CPUID = 10, // x86
VFP3 = 1, // ARM
ARMv7 = 2, // ARM
- VFP2 = 3, // ARM
- SUDIV = 4, // ARM
- UNALIGNED_ACCESSES = 5, // ARM
- MOVW_MOVT_IMMEDIATE_LOADS = 6, // ARM
- VFP32DREGS = 7, // ARM
+ SUDIV = 3, // ARM
+ UNALIGNED_ACCESSES = 4, // ARM
+ MOVW_MOVT_IMMEDIATE_LOADS = 5, // ARM
+ VFP32DREGS = 6, // ARM
SAHF = 0, // x86
FPU = 1}; // MIPS
@@ -496,8 +494,8 @@ enum VariableMode {
INTERNAL, // like VAR, but not user-visible (may or may not
// be in a context)
- TEMPORARY, // temporary variables (not user-visible), never
- // in a context
+ TEMPORARY, // temporary variables (not user-visible), stack-allocated
+ // unless the scope as a whole has forced context allocation
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index d9dc09651..83b561859 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -26,7 +26,6 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file relies on the fact that the following declarations have been made
-//
// in runtime.js:
// var $Object = global.Object;
// var $Boolean = global.Boolean;
@@ -43,7 +42,6 @@ var $isFinite = GlobalIsFinite;
// ----------------------------------------------------------------------------
-
// Helper function used to install functions on objects.
function InstallFunctions(object, attributes, functions) {
if (functions.length >= 8) {
@@ -61,7 +59,7 @@ function InstallFunctions(object, attributes, functions) {
}
-// Helper function to install a getter only property.
+// Helper function to install a getter-only accessor property.
function InstallGetter(object, name, getter) {
%FunctionSetName(getter, name);
%FunctionRemovePrototype(getter);
@@ -70,6 +68,18 @@ function InstallGetter(object, name, getter) {
}
+// Helper function to install a getter/setter accessor property.
+function InstallGetterSetter(object, name, getter, setter) {
+ %FunctionSetName(getter, name);
+ %FunctionSetName(setter, name);
+ %FunctionRemovePrototype(getter);
+ %FunctionRemovePrototype(setter);
+ %DefineOrRedefineAccessorProperty(object, name, getter, setter, DONT_ENUM);
+ %SetNativeFlag(getter);
+ %SetNativeFlag(setter);
+}
+
+
// Prevents changes to the prototype of a built-in function.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
@@ -186,6 +196,7 @@ function GlobalEval(x) {
// Set up global object.
function SetUpGlobal() {
%CheckIsBootstrapping();
+
// ECMA 262 - 15.1.1.1.
%SetProperty(global, "NaN", $NaN, DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -208,27 +219,10 @@ function SetUpGlobal() {
SetUpGlobal();
-// ----------------------------------------------------------------------------
-// Boolean (first part of definition)
-
-
-%SetCode($Boolean, function(x) {
- if (%_IsConstructCall()) {
- %_SetValueOf(this, ToBoolean(x));
- } else {
- return ToBoolean(x);
- }
-});
-
-%FunctionSetPrototype($Boolean, new $Boolean(false));
-
-%SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
// ----------------------------------------------------------------------------
// Object
-$Object.prototype.constructor = $Object;
-
// ECMA-262 - 15.2.4.2
function ObjectToString() {
if (IS_UNDEFINED(this) && !IS_UNDETECTABLE(this)) return "[object Undefined]";
@@ -395,7 +389,8 @@ function FromPropertyDescriptor(desc) {
}
// Must be an AccessorDescriptor then. We never return a generic descriptor.
return { get: desc.getGet(),
- set: desc.getSet(),
+ set: desc.getSet() === ObjectSetProto ? ObjectPoisonProto
+ : desc.getSet(),
enumerable: desc.isEnumerable(),
configurable: desc.isConfigurable() };
}
@@ -937,7 +932,7 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
// Step 4 - Special handling for array index.
var index = ToUint32(p);
- if (index == ToNumber(p) && index != 4294967295) {
+ if (ToString(index) == p && index != 4294967295) {
var length = obj.length;
var length_desc = GetOwnProperty(obj, "length");
if ((index >= length && !length_desc.isWritable()) ||
@@ -1326,7 +1321,25 @@ function ObjectIs(obj1, obj2) {
}
-%SetCode($Object, function(x) {
+// Harmony __proto__ getter.
+function ObjectGetProto() {
+ return %GetPrototype(this);
+}
+
+
+// Harmony __proto__ setter.
+function ObjectSetProto(obj) {
+ return %SetPrototype(this, obj);
+}
+
+
+// Harmony __proto__ poison pill.
+function ObjectPoisonProto(obj) {
+ throw MakeTypeError("proto_poison_pill", []);
+}
+
+
+function ObjectConstructor(x) {
if (%_IsConstructCall()) {
if (x == null) return this;
return ToObject(x);
@@ -1334,16 +1347,23 @@ function ObjectIs(obj1, obj2) {
if (x == null) return { };
return ToObject(x);
}
-});
+}
-%SetExpectedNumberOfProperties($Object, 4);
// ----------------------------------------------------------------------------
// Object
function SetUpObject() {
%CheckIsBootstrapping();
- // Set Up non-enumerable functions on the Object.prototype object.
+
+ %SetCode($Object, ObjectConstructor);
+ %FunctionSetName(ObjectPoisonProto, "__proto__");
+ %FunctionRemovePrototype(ObjectPoisonProto);
+ %SetExpectedNumberOfProperties($Object, 4);
+
+ %SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
+
+ // Set up non-enumerable functions on the Object.prototype object.
InstallFunctions($Object.prototype, DONT_ENUM, $Array(
"toString", ObjectToString,
"toLocaleString", ObjectToLocaleString,
@@ -1356,6 +1376,10 @@ function SetUpObject() {
"__defineSetter__", ObjectDefineSetter,
"__lookupSetter__", ObjectLookupSetter
));
+ InstallGetterSetter($Object.prototype, "__proto__",
+ ObjectGetProto, ObjectSetProto);
+
+ // Set up non-enumerable functions in the Object object.
InstallFunctions($Object, DONT_ENUM, $Array(
"keys", ObjectKeys,
"create", ObjectCreate,
@@ -1376,9 +1400,19 @@ function SetUpObject() {
SetUpObject();
+
// ----------------------------------------------------------------------------
// Boolean
+function BooleanConstructor(x) {
+ if (%_IsConstructCall()) {
+ %_SetValueOf(this, ToBoolean(x));
+ } else {
+ return ToBoolean(x);
+ }
+}
+
+
function BooleanToString() {
// NOTE: Both Boolean objects and values can enter here as
// 'this'. This is not as dictated by ECMA-262.
@@ -1405,9 +1439,13 @@ function BooleanValueOf() {
// ----------------------------------------------------------------------------
-
function SetUpBoolean () {
%CheckIsBootstrapping();
+
+ %SetCode($Boolean, BooleanConstructor);
+ %FunctionSetPrototype($Boolean, new $Boolean(false));
+ %SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
+
InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
"toString", BooleanToString,
"valueOf", BooleanValueOf
@@ -1420,17 +1458,15 @@ SetUpBoolean();
// ----------------------------------------------------------------------------
// Number
-// Set the Number function and constructor.
-%SetCode($Number, function(x) {
+function NumberConstructor(x) {
var value = %_ArgumentsLength() == 0 ? 0 : ToNumber(x);
if (%_IsConstructCall()) {
%_SetValueOf(this, value);
} else {
return value;
}
-});
+}
-%FunctionSetPrototype($Number, new $Number(0));
// ECMA-262 section 15.7.4.2.
function NumberToString(radix) {
@@ -1568,6 +1604,10 @@ function NumberIsNaN(number) {
function SetUpNumber() {
%CheckIsBootstrapping();
+
+ %SetCode($Number, NumberConstructor);
+ %FunctionSetPrototype($Number, new $Number(0));
+
%OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
// Set up the constructor property on the Number prototype object.
%SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
@@ -1620,13 +1660,12 @@ SetUpNumber();
// ----------------------------------------------------------------------------
// Function
-$Function.prototype.constructor = $Function;
-
function FunctionSourceString(func) {
while (%IsJSFunctionProxy(func)) {
func = %GetCallTrap(func);
}
+ // TODO(wingo): Print source using function* for generators.
if (!IS_FUNCTION(func)) {
throw new $TypeError('Function.prototype.toString is not generic');
}
@@ -1745,12 +1784,15 @@ function NewFunction(arg1) { // length == 1
return %SetNewFunctionAttributes(f);
}
-%SetCode($Function, NewFunction);
// ----------------------------------------------------------------------------
function SetUpFunction() {
%CheckIsBootstrapping();
+
+ %SetCode($Function, NewFunction);
+ %SetProperty($Function.prototype, "constructor", $Function, DONT_ENUM);
+
InstallFunctions($Function.prototype, DONT_ENUM, $Array(
"bind", FunctionBind,
"toString", FunctionToString
diff --git a/deps/v8/src/v8utils.cc b/deps/v8/src/v8utils.cc
index 58ad4e5ed..7390d854e 100644
--- a/deps/v8/src/v8utils.cc
+++ b/deps/v8/src/v8utils.cc
@@ -105,12 +105,12 @@ char* ReadLine(const char* prompt) {
char* new_result = NewArray<char>(new_len);
// Copy the existing input into the new array and set the new
// array as the result.
- memcpy(new_result, result, offset * kCharSize);
+ OS::MemCopy(new_result, result, offset * kCharSize);
DeleteArray(result);
result = new_result;
}
// Copy the newly read line into the result.
- memcpy(result + offset, line_buf, len * kCharSize);
+ OS::MemCopy(result + offset, line_buf, len * kCharSize);
offset += len;
}
ASSERT(result != NULL);
@@ -264,7 +264,7 @@ void StringBuilder::AddFormatted(const char* format, ...) {
void StringBuilder::AddFormattedList(const char* format, va_list list) {
- ASSERT(!is_finalized() && position_ < buffer_.length());
+ ASSERT(!is_finalized() && position_ <= buffer_.length());
int n = OS::VSNPrintF(buffer_ + position_, format, list);
if (n < 0 || n >= (buffer_.length() - position_)) {
position_ = buffer_.length();
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index b5c8f084e..8661f9b88 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -125,49 +125,126 @@ inline Vector< Handle<Object> > HandleVector(v8::internal::Handle<T>* elms,
// ----------------------------------------------------------------------------
// Memory
-// Copies data from |src| to |dst|. The data spans must not overlap.
+// Copies words from |src| to |dst|. The data spans must not overlap.
template <typename T>
-inline void CopyWords(T* dst, T* src, int num_words) {
+inline void CopyWords(T* dst, const T* src, size_t num_words) {
STATIC_ASSERT(sizeof(T) == kPointerSize);
- ASSERT(Min(dst, src) + num_words <= Max(dst, src));
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
+ Max(dst, const_cast<T*>(src)));
ASSERT(num_words > 0);
// Use block copying OS::MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
- static const int kBlockCopyLimit = 16;
- STATIC_ASSERT(kBlockCopyLimit * kPointerSize >= OS::kMinComplexMemCopy);
+ static const size_t kBlockCopyLimit = 16;
- if (num_words >= kBlockCopyLimit) {
- OS::MemCopy(dst, src, num_words * kPointerSize);
+ if (num_words < kBlockCopyLimit) {
+ do {
+ num_words--;
+ *dst++ = *src++;
+ } while (num_words > 0);
} else {
- int remaining = num_words;
+ OS::MemCopy(dst, src, num_words * kPointerSize);
+ }
+}
+
+
+// Copies words from |src| to |dst|. No restrictions.
+template <typename T>
+inline void MoveWords(T* dst, const T* src, size_t num_words) {
+ STATIC_ASSERT(sizeof(T) == kPointerSize);
+ ASSERT(num_words > 0);
+
+ // Use block copying OS::MemCopy if the segment we're copying is
+ // enough to justify the extra call/setup overhead.
+ static const size_t kBlockCopyLimit = 16;
+
+ if (num_words < kBlockCopyLimit &&
+ ((dst < src) || (dst >= (src + num_words * kPointerSize)))) {
+ T* end = dst + num_words;
do {
- remaining--;
+ num_words--;
*dst++ = *src++;
- } while (remaining > 0);
+ } while (num_words > 0);
+ } else {
+ OS::MemMove(dst, src, num_words * kPointerSize);
}
}
// Copies data from |src| to |dst|. The data spans must not overlap.
template <typename T>
-inline void CopyBytes(T* dst, T* src, size_t num_bytes) {
+inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
STATIC_ASSERT(sizeof(T) == 1);
- ASSERT(Min(dst, src) + num_bytes <= Max(dst, src));
+ ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
+ Max(dst, const_cast<T*>(src)));
if (num_bytes == 0) return;
// Use block copying OS::MemCopy if the segment we're copying is
// enough to justify the extra call/setup overhead.
static const int kBlockCopyLimit = OS::kMinComplexMemCopy;
- if (num_bytes >= static_cast<size_t>(kBlockCopyLimit)) {
- OS::MemCopy(dst, src, num_bytes);
- } else {
- size_t remaining = num_bytes;
+ if (num_bytes < static_cast<size_t>(kBlockCopyLimit)) {
do {
- remaining--;
+ num_bytes--;
*dst++ = *src++;
- } while (remaining > 0);
+ } while (num_bytes > 0);
+ } else {
+ OS::MemCopy(dst, src, num_bytes);
+ }
+}
+
+
+// Copies data from |src| to |dst|. No restrictions.
+template <typename T>
+inline void MoveBytes(T* dst, const T* src, size_t num_bytes) {
+ STATIC_ASSERT(sizeof(T) == 1);
+ switch (num_bytes) {
+ case 0: return;
+ case 1:
+ *dst = *src;
+ return;
+#ifdef V8_HOST_CAN_READ_UNALIGNED
+ case 2:
+ *reinterpret_cast<uint16_t*>(dst) = *reinterpret_cast<const uint16_t*>(src);
+ return;
+ case 3: {
+ uint16_t part1 = *reinterpret_cast<const uint16_t*>(src);
+ byte part2 = *(src + 2);
+ *reinterpret_cast<uint16_t*>(dst) = part1;
+ *(dst + 2) = part2;
+ return;
+ }
+ case 4:
+ *reinterpret_cast<uint32_t*>(dst) = *reinterpret_cast<const uint32_t*>(src);
+ return;
+ case 5:
+ case 6:
+ case 7:
+ case 8: {
+ uint32_t part1 = *reinterpret_cast<const uint32_t*>(src);
+ uint32_t part2 = *reinterpret_cast<const uint32_t*>(src + num_bytes - 4);
+ *reinterpret_cast<uint32_t*>(dst) = part1;
+ *reinterpret_cast<uint32_t*>(dst + num_bytes - 4) = part2;
+ return;
+ }
+ case 9:
+ case 10:
+ case 11:
+ case 12:
+ case 13:
+ case 14:
+ case 15:
+ case 16: {
+ double part1 = *reinterpret_cast<const double*>(src);
+ double part2 = *reinterpret_cast<const double*>(src + num_bytes - 8);
+ *reinterpret_cast<double*>(dst) = part1;
+ *reinterpret_cast<double*>(dst + num_bytes - 8) = part2;
+ return;
+ }
+#endif
+ default:
+ OS::MemMove(dst, src, num_bytes);
+ return;
}
}
@@ -185,6 +262,13 @@ inline void MemsetPointer(T** dest, U* value, int counter) {
#elif defined(V8_HOST_ARCH_X64)
#define STOS "stosq"
#endif
+#if defined(__native_client__)
+ // This STOS sequence does not validate for x86_64 Native Client.
+ // Here we #undef STOS to force use of the slower C version.
+ // TODO(bradchen): Profile V8 and implement a faster REP STOS
+ // here if the profile indicates it matters.
+#undef STOS
+#endif
#if defined(__GNUC__) && defined(STOS)
asm volatile(
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 64ed98d46..933e2540a 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,8 +33,8 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 17
-#define BUILD_NUMBER 16
+#define MINOR_VERSION 18
+#define BUILD_NUMBER 0
#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index 57d40f794..25979f936 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -150,9 +150,8 @@ void CpuFeatures::Probe() {
found_by_runtime_probing_only_
= probed_features & ~kDefaultCpuFeatures & ~platform_features;
- // SSE2 and CMOV must be available on an X64 CPU.
+ // CMOV must be available on an X64 CPU.
ASSERT(IsSupported(CPUID));
- ASSERT(IsSupported(SSE2));
ASSERT(IsSupported(CMOV));
delete memory;
@@ -486,9 +485,9 @@ void Assembler::GrowBuffer() {
intptr_t pc_delta = desc.buffer - buffer_;
intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
(buffer_ + buffer_size_);
- memmove(desc.buffer, buffer_, desc.instr_size);
- memmove(rc_delta + reloc_info_writer.pos(),
- reloc_info_writer.pos(), desc.reloc_size);
+ OS::MemMove(desc.buffer, buffer_, desc.instr_size);
+ OS::MemMove(rc_delta + reloc_info_writer.pos(),
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate() != NULL &&
@@ -2602,6 +2601,26 @@ void Assembler::movdqa(XMMRegister dst, const Operand& src) {
}
+void Assembler::movdqu(const Operand& dst, XMMRegister src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(src, dst);
+ emit(0x0F);
+ emit(0x7F);
+ emit_sse_operand(src, dst);
+}
+
+
+void Assembler::movdqu(XMMRegister dst, const Operand& src) {
+ EnsureSpace ensure_space(this);
+ emit(0xF3);
+ emit_rex_64(dst, src);
+ emit(0x0F);
+ emit(0x6F);
+ emit_sse_operand(dst, src);
+}
+
+
void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
ASSERT(CpuFeatures::IsSupported(SSE4_1));
ASSERT(is_uint8(imm8));
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index 49a57e692..2445e2335 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -448,7 +448,7 @@ class Operand BASE_EMBEDDED {
// CpuFeatureScope fscope(assembler, SSE3);
// // Generate SSE3 floating point code.
// } else {
-// // Generate standard x87 or SSE2 floating point code.
+// // Generate standard SSE2 floating point code.
// }
class CpuFeatures : public AllStatic {
public:
@@ -459,7 +459,6 @@ class CpuFeatures : public AllStatic {
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
- if (f == SSE2 && !FLAG_enable_sse2) return false;
if (f == SSE3 && !FLAG_enable_sse3) return false;
if (f == SSE4_1 && !FLAG_enable_sse4_1) return false;
if (f == CMOV && !FLAG_enable_cmov) return false;
@@ -480,11 +479,11 @@ class CpuFeatures : public AllStatic {
}
private:
- // Safe defaults include SSE2 and CMOV for X64. It is always available, if
+ // Safe defaults include CMOV for X64. It is always available, if
// anyone checks, but they shouldn't need to check.
// The required user mode extensions in X64 are (from AMD64 ABI Table A.1):
// fpu, tsc, cx8, cmov, mmx, sse, sse2, fxsr, syscall
- static const uint64_t kDefaultCpuFeatures = (1 << SSE2 | 1 << CMOV);
+ static const uint64_t kDefaultCpuFeatures = (1 << CMOV);
#ifdef DEBUG
static bool initialized_;
@@ -1310,6 +1309,9 @@ class Assembler : public AssemblerBase {
void movdqa(const Operand& dst, XMMRegister src);
void movdqa(XMMRegister dst, const Operand& src);
+ void movdqu(const Operand& dst, XMMRegister src);
+ void movdqu(XMMRegister dst, const Operand& src);
+
void movapd(XMMRegister dst, XMMRegister src);
void movaps(XMMRegister dst, XMMRegister src);
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index 2da3de2b9..ba7647bf3 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -1837,21 +1837,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
- // Get the loop depth of the stack guard check. This is recorded in
- // a test(rax, depth) instruction right after the call.
- Label stack_check;
- __ movq(rbx, Operand(rsp, 0)); // return address
- __ movzxbq(rbx, Operand(rbx, 1)); // depth
-
- // Get the loop nesting level at which we allow OSR from the
- // unoptimized code and check if we want to do OSR yet. If not we
- // should perform a stack guard check so we can get interrupts while
- // waiting for on-stack replacement.
__ movq(rax, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ movq(rcx, FieldOperand(rax, JSFunction::kSharedFunctionInfoOffset));
- __ movq(rcx, FieldOperand(rcx, SharedFunctionInfo::kCodeOffset));
- __ cmpb(rbx, FieldOperand(rcx, Code::kAllowOSRAtLoopNestingLevelOffset));
- __ j(greater, &stack_check);
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
@@ -1868,21 +1854,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
- // If we decide not to perform on-stack replacement we perform a
- // stack guard check to enable interrupts.
- __ bind(&stack_check);
- Label ok;
- __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
- __ j(above_equal, &ok, Label::kNear);
-
- StackCheckStub stub;
- __ TailCallStub(&stub);
- if (FLAG_debug_code) {
- __ Abort("Unreachable code: returned from tail call.");
- }
- __ bind(&ok);
- __ ret(0);
-
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiToInteger32(rax, rax);
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index fffd37f51..787c7fb51 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_X64)
#include "bootstrapper.h"
+#include "builtins-decls.h"
#include "code-stubs.h"
#include "regexp-macro-assembler.h"
#include "stub-cache.h"
@@ -39,6 +40,18 @@ namespace v8 {
namespace internal {
+void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
+ Isolate* isolate,
+ CodeStubInterfaceDescriptor* descriptor) {
+ static Register registers[] = { rax, rbx, rcx };
+ descriptor->register_param_count_ = 3;
+ descriptor->register_params_ = registers;
+ descriptor->stack_parameter_count_ = NULL;
+ descriptor->deoptimization_handler_ =
+ Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
+}
+
+
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@@ -158,9 +171,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ movq(rdx, Operand(rsp, 1 * kPointerSize));
- int map_index = (language_mode_ == CLASSIC_MODE)
- ? Context::FUNCTION_MAP_INDEX
- : Context::STRICT_MODE_FUNCTION_MAP_INDEX;
+ int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@@ -386,165 +397,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
-static void GenerateFastCloneShallowArrayCommon(
- MacroAssembler* masm,
- int length,
- FastCloneShallowArrayStub::Mode mode,
- AllocationSiteMode allocation_site_mode,
- Label* fail) {
- // Registers on entry:
- //
- // rcx: boilerplate literal array.
- ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
-
- // All sizes here are multiples of kPointerSize.
- int elements_size = 0;
- if (length > 0) {
- elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
- ? FixedDoubleArray::SizeFor(length)
- : FixedArray::SizeFor(length);
- }
- int size = JSArray::kSize;
- int allocation_info_start = size;
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- size += AllocationSiteInfo::kSize;
- }
- size += elements_size;
-
- // Allocate both the JS array and the elements array in one big
- // allocation. This avoids multiple limit checks.
- AllocationFlags flags = TAG_OBJECT;
- if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
- flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
- }
- __ Allocate(size, rax, rbx, rdx, fail, flags);
-
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(rax, allocation_info_start), kScratchRegister);
- __ movq(FieldOperand(rax, allocation_info_start + kPointerSize), rcx);
- }
-
- // Copy the JS array part.
- for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
- if ((i != JSArray::kElementsOffset) || (length == 0)) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rax, i), rbx);
- }
- }
-
- if (length > 0) {
- // Get hold of the elements array of the boilerplate and setup the
- // elements pointer in the resulting object.
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
- __ lea(rdx, Operand(rax, JSArray::kSize + AllocationSiteInfo::kSize));
- } else {
- __ lea(rdx, Operand(rax, JSArray::kSize));
- }
- __ movq(FieldOperand(rax, JSArray::kElementsOffset), rdx);
-
- // Copy the elements array.
- if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
- for (int i = 0; i < elements_size; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- } else {
- ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
- int i;
- for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
- __ movq(rbx, FieldOperand(rcx, i));
- __ movq(FieldOperand(rdx, i), rbx);
- }
- while (i < elements_size) {
- __ movsd(xmm0, FieldOperand(rcx, i));
- __ movsd(FieldOperand(rdx, i), xmm0);
- i += kDoubleSize;
- }
- ASSERT(i == elements_size);
- }
- }
-}
-
-void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
- // Stack layout on entry:
- //
- // [rsp + kPointerSize]: constant elements.
- // [rsp + (2 * kPointerSize)]: literal index.
- // [rsp + (3 * kPointerSize)]: literals array.
-
- // Load boilerplate object into rcx and check if we need to create a
- // boilerplate.
- __ movq(rcx, Operand(rsp, 3 * kPointerSize));
- __ movq(rax, Operand(rsp, 2 * kPointerSize));
- SmiIndex index = masm->SmiToIndex(rax, rax, kPointerSizeLog2);
- __ movq(rcx,
- FieldOperand(rcx, index.reg, index.scale, FixedArray::kHeaderSize));
- __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
- Label slow_case;
- __ j(equal, &slow_case);
-
- FastCloneShallowArrayStub::Mode mode = mode_;
- // rcx is boilerplate object.
- Factory* factory = masm->isolate()->factory();
- if (mode == CLONE_ANY_ELEMENTS) {
- Label double_elements, check_fast_elements;
- __ movq(rbx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_cow_array_map());
- __ j(not_equal, &check_fast_elements);
- GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&check_fast_elements);
- __ Cmp(FieldOperand(rbx, HeapObject::kMapOffset),
- factory->fixed_array_map());
- __ j(not_equal, &double_elements);
- GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&double_elements);
- mode = CLONE_DOUBLE_ELEMENTS;
- // Fall through to generate the code to handle double elements.
- }
-
- if (FLAG_debug_code) {
- const char* message;
- Heap::RootListIndex expected_map_index;
- if (mode == CLONE_ELEMENTS) {
- message = "Expected (writable) fixed array";
- expected_map_index = Heap::kFixedArrayMapRootIndex;
- } else if (mode == CLONE_DOUBLE_ELEMENTS) {
- message = "Expected (writable) fixed double array";
- expected_map_index = Heap::kFixedDoubleArrayMapRootIndex;
- } else {
- ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
- message = "Expected copy-on-write fixed array";
- expected_map_index = Heap::kFixedCOWArrayMapRootIndex;
- }
- __ push(rcx);
- __ movq(rcx, FieldOperand(rcx, JSArray::kElementsOffset));
- __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
- expected_map_index);
- __ Assert(equal, message);
- __ pop(rcx);
- }
-
- GenerateFastCloneShallowArrayCommon(masm, length_, mode,
- allocation_site_mode_,
- &slow_case);
- __ ret(3 * kPointerSize);
-
- __ bind(&slow_case);
- __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
-}
-
-
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@@ -4154,6 +4006,7 @@ bool CEntryStub::IsPregenerated() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+ StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
@@ -4299,12 +4152,19 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
- // Retrieve the pending exception and clear the variable.
+ // Retrieve the pending exception.
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, masm->isolate());
Operand pending_exception_operand =
masm->ExternalOperand(pending_exception_address);
__ movq(rax, pending_exception_operand);
+
+ // See if we just retrieved an OOM exception.
+ JumpIfOOM(masm, rax, kScratchRegister, throw_out_of_memory_exception);
+
+ // Clear the pending exception.
+ pending_exception_operand =
+ masm->ExternalOperand(pending_exception_address);
__ LoadRoot(rdx, Heap::kTheHoleValueRootIndex);
__ movq(pending_exception_operand, rdx);
@@ -4412,6 +4272,11 @@ void CEntryStub::Generate(MacroAssembler* masm) {
void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
Label invoke, handler_entry, exit;
Label not_outermost_js, not_outermost_js_2;
+
+#ifdef _WIN64
+ const int kCalleeSaveXMMRegisters = 10;
+ const int kFullXMMRegisterSize = 16;
+#endif
{ // NOLINT. Scope block confuses linter.
MacroAssembler::NoRootArrayScope uninitialized_root_register(masm);
// Set up frame.
@@ -4438,8 +4303,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
__ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI.
#endif
__ push(rbx);
- // TODO(X64): On Win64, if we ever use XMM6-XMM15, the low low 64 bits are
- // callee save as well.
+
+#ifdef _WIN64
+ // On Win64 XMM6-XMM15 are callee-save
+ __ subq(rsp, Immediate(kCalleeSaveXMMRegisters * kFullXMMRegisterSize));
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 0), xmm6);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 1), xmm7);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 2), xmm8);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 3), xmm9);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 4), xmm10);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 5), xmm11);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 6), xmm12);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 7), xmm13);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 8), xmm14);
+ __ movdqu(Operand(rsp, kFullXMMRegisterSize * 9), xmm15);
+#endif
// Set up the roots and smi constant registers.
// Needs to be done before any further smi loads.
@@ -4529,6 +4407,21 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
// Restore callee-saved registers (X64 conventions).
+#ifdef _WIN64
+ // On Win64 XMM6-XMM15 are callee-save
+ __ movdqu(xmm6, Operand(rsp, kFullXMMRegisterSize * 0));
+ __ movdqu(xmm7, Operand(rsp, kFullXMMRegisterSize * 1));
+ __ movdqu(xmm8, Operand(rsp, kFullXMMRegisterSize * 2));
+ __ movdqu(xmm8, Operand(rsp, kFullXMMRegisterSize * 3));
+ __ movdqu(xmm10, Operand(rsp, kFullXMMRegisterSize * 4));
+ __ movdqu(xmm11, Operand(rsp, kFullXMMRegisterSize * 5));
+ __ movdqu(xmm12, Operand(rsp, kFullXMMRegisterSize * 6));
+ __ movdqu(xmm13, Operand(rsp, kFullXMMRegisterSize * 7));
+ __ movdqu(xmm14, Operand(rsp, kFullXMMRegisterSize * 8));
+ __ movdqu(xmm15, Operand(rsp, kFullXMMRegisterSize * 9));
+ __ addq(rsp, Immediate(kCalleeSaveXMMRegisters * kFullXMMRegisterSize));
+#endif
+
__ pop(rbx);
#ifdef _WIN64
// Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI.
@@ -6775,8 +6668,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
- ASSERT(!Serializer::enabled());
- CEntryStub ces(1, kSaveFPRegs);
+ CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index 0bb02dbe7..bae97cd81 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -117,40 +117,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
static const byte kJnsInstruction = 0x79;
-static const byte kJnsOffset = 0x1f;
+static const byte kJnsOffset = 0x1d;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
-void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
+// The back edge bookkeeping code matches the pattern:
+//
+// add <profiling_counter>, <-delta>
+// jns ok
+// call <stack guard>
+// ok:
+//
+// We will patch away the branch so the code is:
+//
+// add <profiling_counter>, <-delta> ;; Not changed
+// nop
+// nop
+// call <on-stack replacment>
+// ok:
+
+void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(!InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ // Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
- ASSERT_EQ(check_code->entry(),
- Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
- //
- // add <profiling_counter>, <-delta>
- // jns ok
- // call <stack guard>
- // test rax, <loop nesting depth>
- // ok: ...
- //
- // We will patch away the branch so the code is:
- //
- // add <profiling_counter>, <-delta> ;; Not changed
- // nop
- // nop
- // call <on-stack replacment>
- // test rax, <loop nesting depth>
- // ok:
- //
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
+ // Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@@ -159,26 +158,48 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
}
-void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
- Address pc_after,
- Code* check_code,
- Code* replacement_code) {
+void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ ASSERT(InterruptCodeIsPatched(unoptimized_code,
+ pc_after,
+ interrupt_code,
+ replacement_code));
+ // Restore the original jump.
Address call_target_address = pc_after - kIntSize;
- ASSERT(replacement_code->entry() ==
- Assembler::target_address_at(call_target_address));
- // Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
- // restore the conditional branch.
- ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
- ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
+ // Restore the original call address.
Assembler::set_target_address_at(call_target_address,
- check_code->entry());
+ interrupt_code->entry());
+
+ interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+ unoptimized_code, call_target_address, interrupt_code);
+}
+
- check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, call_target_address, check_code);
+#ifdef DEBUG
+bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
+ Address pc_after,
+ Code* interrupt_code,
+ Code* replacement_code) {
+ Address call_target_address = pc_after - kIntSize;
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (*(call_target_address - 3) == kNopByteOne) {
+ ASSERT(replacement_code->entry() ==
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+ return true;
+ } else {
+ ASSERT_EQ(interrupt_code->entry(),
+ Assembler::target_address_at(call_target_address));
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ return false;
+ }
}
+#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
diff --git a/deps/v8/src/x64/frames-x64.h b/deps/v8/src/x64/frames-x64.h
index 53c345906..14cc5b8f2 100644
--- a/deps/v8/src/x64/frames-x64.h
+++ b/deps/v8/src/x64/frames-x64.h
@@ -48,18 +48,6 @@ const int kNumSafepointRegisters = 16;
// ----------------------------------------------------
-class StackHandlerConstants : public AllStatic {
- public:
- static const int kNextOffset = 0 * kPointerSize;
- static const int kCodeOffset = 1 * kPointerSize;
- static const int kStateOffset = 2 * kPointerSize;
- static const int kContextOffset = 3 * kPointerSize;
- static const int kFPOffset = 4 * kPointerSize;
-
- static const int kSize = kFPOffset + kPointerSize;
-};
-
-
class EntryFrameConstants : public AllStatic {
public:
#ifdef _WIN64
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 322479dec..564c3def8 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -160,6 +160,8 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
+ // Generators allocate locals, if any, in context slots.
+ ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count == 1) {
__ PushRoot(Heap::kUndefinedValueRootIndex);
} else if (locals_count > 1) {
@@ -347,13 +349,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
// the deoptimization input data found in the optimized code.
RecordBackEdge(stmt->OsrEntryId());
- // Loop stack checks can be patched to perform on-stack replacement. In
- // order to decide whether or not to perform OSR we embed the loop depth
- // in a test instruction after the call so we can extract it from the OSR
- // builtin.
- ASSERT(loop_depth() > 0);
- __ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
-
EmitProfilingCounterReset();
__ bind(&ok);
@@ -1240,7 +1235,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
- FastNewClosureStub stub(info->language_mode());
+ FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ Push(info);
__ CallStub(&stub);
} else {
@@ -1695,24 +1690,33 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
- __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
- __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
- __ Push(Smi::FromInt(expr->literal_index()));
- __ Push(constant_elements);
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_elements);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
- } else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ } else if (Serializer::enabled() ||
+ length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Push(Smi::FromInt(expr->literal_index()));
+ __ Push(constant_elements);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@@ -1729,6 +1733,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
+ __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+ __ movq(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
+ __ Move(rbx, Smi::FromInt(expr->literal_index()));
+ __ Move(rcx, constant_elements);
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index 817a9d35e..6fc56ed1e 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -118,7 +118,7 @@ void LCodeGen::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
int length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
- memcpy(copy.start(), builder.Finalize(), copy.length());
+ OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
@@ -1793,6 +1793,8 @@ void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
break;
case Token::DIV:
__ divsd(left, right);
+ // Don't delete this mov. It may improve performance on some CPUs,
+ // when there is a mulsd depending on the result
__ movaps(left, left);
break;
case Token::MOD:
@@ -3366,7 +3368,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
-void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
Heap::kHeapNumberMapRootIndex);
@@ -3418,7 +3420,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
-void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input_reg = ToRegister(instr->value());
__ testl(input_reg, input_reg);
Label is_positive;
@@ -3429,19 +3431,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
- DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
- LUnaryMathOperation* instr)
+ DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
- LUnaryMathOperation* instr_;
+ LMathAbs* instr_;
};
ASSERT(instr->value()->Equals(instr->result()));
@@ -3469,7 +3470,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3528,7 +3529,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathRound(LMathRound* instr) {
const XMMRegister xmm_scratch = xmm0;
Register output_reg = ToRegister(instr->result());
XMMRegister input_reg = ToDoubleRegister(instr->value());
@@ -3589,14 +3590,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
__ sqrtsd(input_reg, input_reg);
}
-void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
XMMRegister xmm_scratch = xmm0;
XMMRegister input_reg = ToDoubleRegister(instr->value());
ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
@@ -3763,7 +3764,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
-void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
@@ -3771,7 +3772,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
@@ -3779,7 +3780,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
@@ -3787,7 +3788,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
+void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
@@ -3795,42 +3796,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
- switch (instr->op()) {
- case kMathAbs:
- DoMathAbs(instr);
- break;
- case kMathFloor:
- DoMathFloor(instr);
- break;
- case kMathRound:
- DoMathRound(instr);
- break;
- case kMathSqrt:
- DoMathSqrt(instr);
- break;
- case kMathPowHalf:
- DoMathPowHalf(instr);
- break;
- case kMathCos:
- DoMathCos(instr);
- break;
- case kMathSin:
- DoMathSin(instr);
- break;
- case kMathTan:
- DoMathTan(instr);
- break;
- case kMathLog:
- DoMathLog(instr);
- break;
-
- default:
- UNREACHABLE();
- }
-}
-
-
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(rdi));
ASSERT(instr->HasPointerMap());
@@ -4977,7 +4942,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
ZoneList<Handle<JSObject> >* prototypes = instr->prototypes();
@@ -4989,7 +4953,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
- __ LoadHeapObject(reg, prototypes->at(prototypes->length() - 1));
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(reg, prototypes->at(i));
@@ -5114,11 +5077,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+ }
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
- if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
- flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
- }
__ Allocate(size, result, temp, no_reg, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@@ -5141,7 +5104,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
PushSafepointRegistersScope scope(this);
__ Integer32ToSmi(size, size);
__ push(size);
- CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
+ if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInOldPointerSpace, 1, instr);
+ } else {
+ CallRuntimeFromDeferred(
+ Runtime::kAllocateInNewSpace, 1, instr);
+ }
__ StoreToSafepointRegisterSlot(result, rax);
}
@@ -5169,26 +5138,33 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
DeoptimizeIf(not_equal, instr->environment());
}
- // Set up the parameters to the stub/runtime call.
- __ PushHeapObject(literals);
- __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
- // Boilerplate already exists, constant elements are never accessed.
- // Pass an empty fixed array.
- __ Push(isolate()->factory()->empty_fixed_array());
-
- // Pick the right runtime function or stub to call.
+ // Set up the parameters to the stub/runtime call and pick the right
+ // runtime function or stub to call. Boilerplate already exists,
+ // constant elements are never accessed, pass an empty fixed array.
int length = instr->hydrogen()->length();
if (instr->hydrogen()->IsCopyOnWrite()) {
ASSERT(instr->hydrogen()->depth() == 1);
+ __ LoadHeapObject(rax, literals);
+ __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Move(rcx, isolate()->factory()->empty_fixed_array());
FastCloneShallowArrayStub::Mode mode =
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS;
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
+ __ PushHeapObject(literals);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(isolate()->factory()->empty_fixed_array());
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
+ __ PushHeapObject(literals);
+ __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Push(isolate()->factory()->empty_fixed_array());
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
+ __ LoadHeapObject(rax, literals);
+ __ Move(rbx, Smi::FromInt(instr->hydrogen()->literal_index()));
+ __ Move(rcx, isolate()->factory()->empty_fixed_array());
FastCloneShallowArrayStub::Mode mode =
boilerplate_elements_kind == FAST_DOUBLE_ELEMENTS
? FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
@@ -5199,163 +5175,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
-void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
- Register result,
- Register source,
- int* offset,
- AllocationSiteMode mode) {
- ASSERT(!source.is(rcx));
- ASSERT(!result.is(rcx));
-
- bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
- object->map()->CanTrackAllocationSite();
-
- // Only elements backing stores for non-COW arrays need to be copied.
- Handle<FixedArrayBase> elements(object->elements());
- bool has_elements = elements->length() > 0 &&
- elements->map() != isolate()->heap()->fixed_cow_array_map();
-
- // Increase the offset so that subsequent objects end up right after
- // this object and its backing store.
- int object_offset = *offset;
- int object_size = object->map()->instance_size();
- int elements_size = has_elements ? elements->Size() : 0;
- int elements_offset = *offset + object_size;
- if (create_allocation_site_info) {
- elements_offset += AllocationSiteInfo::kSize;
- *offset += AllocationSiteInfo::kSize;
- }
-
- *offset += object_size + elements_size;
-
- // Copy object header.
- ASSERT(object->properties()->length() == 0);
- int inobject_properties = object->map()->inobject_properties();
- int header_size = object_size - inobject_properties * kPointerSize;
- for (int i = 0; i < header_size; i += kPointerSize) {
- if (has_elements && i == JSObject::kElementsOffset) {
- __ lea(rcx, Operand(result, elements_offset));
- } else {
- __ movq(rcx, FieldOperand(source, i));
- }
- __ movq(FieldOperand(result, object_offset + i), rcx);
- }
-
- // Copy in-object properties.
- for (int i = 0; i < inobject_properties; i++) {
- int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
- Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
- isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
-
- // Build Allocation Site Info if desired
- if (create_allocation_site_info) {
- __ LoadRoot(kScratchRegister, Heap::kAllocationSiteInfoMapRootIndex);
- __ movq(FieldOperand(result, object_size), kScratchRegister);
- __ movq(FieldOperand(result, object_size + kPointerSize), source);
- }
-
- if (has_elements) {
- // Copy elements backing store header.
- __ LoadHeapObject(source, elements);
- for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
- __ movq(rcx, FieldOperand(source, i));
- __ movq(FieldOperand(result, elements_offset + i), rcx);
- }
-
- // Copy elements backing store content.
- int elements_length = elements->length();
- if (elements->IsFixedDoubleArray()) {
- Handle<FixedDoubleArray> double_array =
- Handle<FixedDoubleArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int64_t value = double_array->get_representation(i);
- int total_offset =
- elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- } else if (elements->IsFixedArray()) {
- Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
- for (int i = 0; i < elements_length; i++) {
- int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
- Handle<Object> value(fast_elements->get(i), isolate());
- if (value->IsJSObject()) {
- Handle<JSObject> value_object = Handle<JSObject>::cast(value);
- __ lea(rcx, Operand(result, *offset));
- __ movq(FieldOperand(result, total_offset), rcx);
- __ LoadHeapObject(source, value_object);
- EmitDeepCopy(value_object, result, source, offset,
- DONT_TRACK_ALLOCATION_SITE);
- } else if (value->IsHeapObject()) {
- __ LoadHeapObject(rcx, Handle<HeapObject>::cast(value));
- __ movq(FieldOperand(result, total_offset), rcx);
- } else {
- __ movq(rcx, value, RelocInfo::NONE64);
- __ movq(FieldOperand(result, total_offset), rcx);
- }
- }
- } else {
- UNREACHABLE();
- }
- }
-}
-
-
-void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
- int size = instr->hydrogen()->total_size();
- ElementsKind boilerplate_elements_kind =
- instr->hydrogen()->boilerplate()->GetElementsKind();
-
- // Deopt if the array literal boilerplate ElementsKind is of a type different
- // than the expected one. The check isn't necessary if the boilerplate has
- // already been converted to TERMINAL_FAST_ELEMENTS_KIND.
- if (CanTransitionToMoreGeneralFastElementsKind(
- boilerplate_elements_kind, true)) {
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- __ movq(rcx, FieldOperand(rbx, HeapObject::kMapOffset));
- // Load the map's "bit field 2".
- __ movb(rcx, FieldOperand(rcx, Map::kBitField2Offset));
- // Retrieve elements_kind from bit field 2.
- __ and_(rcx, Immediate(Map::kElementsKindMask));
- __ cmpb(rcx, Immediate(boilerplate_elements_kind <<
- Map::kElementsKindShift));
- DeoptimizeIf(not_equal, instr->environment());
- }
-
- // Allocate all objects that are part of the literal in one big
- // allocation. This avoids multiple limit checks.
- Label allocated, runtime_allocate;
- __ Allocate(size, rax, rcx, rdx, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
-
- __ bind(&runtime_allocate);
- __ Push(Smi::FromInt(size));
- CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
-
- __ bind(&allocated);
- int offset = 0;
- __ LoadHeapObject(rbx, instr->hydrogen()->boilerplate());
- EmitDeepCopy(instr->hydrogen()->boilerplate(), rax, rbx, &offset,
- instr->hydrogen()->allocation_site_mode());
- ASSERT_EQ(size, offset);
-}
-
-
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
@@ -5458,7 +5277,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
- FastNewClosureStub stub(shared_info->language_mode());
+ FastNewClosureStub stub(shared_info->language_mode(),
+ shared_info->is_generator());
__ Push(shared_info);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else {
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index 580929637..e85760683 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -111,7 +111,7 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagD(LNumberTagD* instr);
void DoDeferredNumberTagU(LNumberTagU* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
- void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
+ void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
@@ -258,17 +258,7 @@ class LCodeGen BASE_EMBEDDED {
uint32_t offset,
uint32_t additional_index = 0);
- // Specific math operations - used from DoUnaryMathOperation.
- void EmitIntegerMathAbs(LUnaryMathOperation* instr);
- void DoMathAbs(LUnaryMathOperation* instr);
- void DoMathFloor(LUnaryMathOperation* instr);
- void DoMathRound(LUnaryMathOperation* instr);
- void DoMathSqrt(LUnaryMathOperation* instr);
- void DoMathPowHalf(LUnaryMathOperation* instr);
- void DoMathLog(LUnaryMathOperation* instr);
- void DoMathTan(LUnaryMathOperation* instr);
- void DoMathCos(LUnaryMathOperation* instr);
- void DoMathSin(LUnaryMathOperation* instr);
+ void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 9a3166ee4..ba29ed969 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -304,17 +304,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
-void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
- stream->Add("/%s ", hydrogen()->OpName());
- value()->PrintTo(stream);
-}
-
-
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -1130,41 +1119,97 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
- BuiltinFunctionId op = instr->op();
- if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
- LOperand* input = UseFixedDouble(instr->value(), xmm1);
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
- } else {
- LOperand* input = UseRegisterAtStart(instr->value());
- LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
- switch (op) {
- case kMathAbs:
- return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
- case kMathFloor:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathRound:
- return AssignEnvironment(DefineAsRegister(result));
- case kMathSqrt:
- return DefineSameAsFirst(result);
- case kMathPowHalf:
- return DefineSameAsFirst(result);
- default:
- UNREACHABLE();
- return NULL;
- }
+ switch (instr->op()) {
+ case kMathFloor: return DoMathFloor(instr);
+ case kMathRound: return DoMathRound(instr);
+ case kMathAbs: return DoMathAbs(instr);
+ case kMathLog: return DoMathLog(instr);
+ case kMathSin: return DoMathSin(instr);
+ case kMathCos: return DoMathCos(instr);
+ case kMathTan: return DoMathTan(instr);
+ case kMathExp: return DoMathExp(instr);
+ case kMathSqrt: return DoMathSqrt(instr);
+ case kMathPowHalf: return DoMathPowHalf(instr);
+ default:
+ UNREACHABLE();
+ return NULL;
}
}
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathFloor* result = new(zone()) LMathFloor(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathRound* result = new(zone()) LMathRound(input);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathAbs* result = new(zone()) LMathAbs(input);
+ return AssignEnvironment(AssignPointerMap(DefineSameAsFirst(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathLog* result = new(zone()) LMathLog(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathSin* result = new(zone()) LMathSin(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathCos* result = new(zone()) LMathCos(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
+ LOperand* input = UseFixedDouble(instr->value(), xmm1);
+ LMathTan* result = new(zone()) LMathTan(input);
+ return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->value()->representation().IsDouble());
+ LOperand* value = UseTempRegister(instr->value());
+ LOperand* temp1 = TempRegister();
+ LOperand* temp2 = TempRegister();
+ LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathSqrt* result = new(zone()) LMathSqrt(input);
+ return DefineSameAsFirst(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+ LOperand* input = UseRegisterAtStart(instr->value());
+ LMathPowHalf* result = new(zone()) LMathPowHalf(input);
+ return DefineSameAsFirst(result);
+}
+
+
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
LOperand* key = UseFixed(instr->key(), rcx);
@@ -1843,7 +1888,7 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
+ return AssignEnvironment(result);
}
@@ -2252,11 +2297,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
-LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
- return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, rax), instr);
-}
-
-
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, rax), instr);
}
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 051f9a469..54f117c0d 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -97,7 +97,6 @@ class LCodeGen;
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
- V(FastLiteral) \
V(FixedArrayBaseLength) \
V(MapEnumLength) \
V(FunctionLiteral) \
@@ -134,9 +133,18 @@ class LCodeGen;
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
+ V(MathAbs) \
+ V(MathCos) \
V(MathExp) \
+ V(MathFloor) \
V(MathFloorOfDiv) \
+ V(MathLog) \
V(MathMinMax) \
+ V(MathPowHalf) \
+ V(MathRound) \
+ V(MathSin) \
+ V(MathSqrt) \
+ V(MathTan) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@@ -178,7 +186,6 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
- V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf) \
V(ForInPrepareMap) \
@@ -650,19 +657,90 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
};
-class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
+class LMathFloor: public LTemplateInstruction<1, 1, 0> {
public:
- explicit LUnaryMathOperation(LOperand* value) {
+ explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
- DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
+ DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
- virtual void PrintDataTo(StringStream* stream);
- BuiltinFunctionId op() const { return hydrogen()->op(); }
+
+class LMathRound: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathRound(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathAbs: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathAbs(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+ DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathLog(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathSin: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSin(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
+};
+
+
+class LMathCos: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathCos(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
+};
+
+
+class LMathTan: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathTan(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
@@ -680,8 +758,30 @@ class LMathExp: public LTemplateInstruction<1, 1, 2> {
LOperand* temp2() { return temps_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
- virtual void PrintDataTo(StringStream* stream);
+
+class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathSqrt(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf: public LTemplateInstruction<1, 1, 0> {
+ public:
+ explicit LMathPowHalf(LOperand* value) {
+ inputs_[0] = value;
+ }
+
+ LOperand* value() { return inputs_[0]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@@ -1250,7 +1350,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
- DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
+ DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@@ -2158,7 +2258,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
@@ -2267,13 +2367,6 @@ class LAllocate: public LTemplateInstruction<1, 1, 1> {
};
-class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
- public:
- DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
- DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
-};
-
-
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@@ -2505,6 +2598,17 @@ class LChunkBuilder BASE_EMBEDDED {
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
+ LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+ LInstruction* DoMathRound(HUnaryMathOperation* instr);
+ LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+ LInstruction* DoMathLog(HUnaryMathOperation* instr);
+ LInstruction* DoMathSin(HUnaryMathOperation* instr);
+ LInstruction* DoMathCos(HUnaryMathOperation* instr);
+ LInstruction* DoMathTan(HUnaryMathOperation* instr);
+ LInstruction* DoMathExp(HUnaryMathOperation* instr);
+ LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+ LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+
private:
enum Status {
UNUSED,
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 6c1c19df3..58659241a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -899,7 +899,6 @@ void MacroAssembler::PushCallerSaved(SaveFPRegsMode fp_mode,
}
// R12 to r15 are callee save on all platforms.
if (fp_mode == kSaveFPRegs) {
- CpuFeatureScope scope(this, SSE2);
subq(rsp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
@@ -914,7 +913,6 @@ void MacroAssembler::PopCallerSaved(SaveFPRegsMode fp_mode,
Register exclusion2,
Register exclusion3) {
if (fp_mode == kSaveFPRegs) {
- CpuFeatureScope scope(this, SSE2);
for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
XMMRegister reg = XMMRegister::from_code(i);
movsd(reg, Operand(rsp, i * kDoubleSize));
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 7e900dbe6..41e5b8826 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -712,25 +712,41 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
}
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+static void GenerateCheckPropertyCell(MacroAssembler* masm,
+ Handle<GlobalObject> global,
+ Handle<Name> name,
+ Register scratch,
+ Label* miss) {
+ Handle<JSGlobalPropertyCell> cell =
+ GlobalObject::EnsurePropertyCell(global, name);
+ ASSERT(cell->value()->IsTheHole());
+ __ Move(scratch, cell);
+ __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
+ masm->isolate()->factory()->the_hole_value());
+ __ j(not_equal, miss);
+}
+
+
// Both name_reg and receiver_reg are preserved on jumps to miss_label,
// but may be destroyed if store is successful.
-void StubCompiler::GenerateStoreField(MacroAssembler* masm,
- Handle<JSObject> object,
- LookupResult* lookup,
- Handle<Map> transition,
- Handle<Name> name,
- Register receiver_reg,
- Register name_reg,
- Register value_reg,
- Register scratch1,
- Register scratch2,
- Label* miss_label,
- Label* miss_restore_name) {
+void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Handle<Map> transition,
+ Handle<Name> name,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label,
+ Label* miss_restore_name) {
// Check that the map of the object hasn't changed.
- CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
- : REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, Handle<Map>(object->map()),
- miss_label, DO_SMI_CHECK, mode);
+ miss_label, DO_SMI_CHECK, REQUIRE_EXACT_MAP);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@@ -738,7 +754,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Check that we are allowed to write this.
- if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
+ if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
@@ -756,12 +772,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
- if (lookup->holder() == *object &&
- !holder->HasFastProperties() &&
- !holder->IsJSGlobalProxy() &&
- !holder->IsJSGlobalObject()) {
- GenerateDictionaryNegativeLookup(
- masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ if (lookup->holder() == *object) {
+ if (holder->IsJSGlobalObject()) {
+ GenerateCheckPropertyCell(
+ masm,
+ Handle<GlobalObject>(GlobalObject::cast(holder)),
+ name,
+ scratch1,
+ miss_restore_name);
+ } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
+ GenerateDictionaryNegativeLookup(
+ masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
+ }
}
}
@@ -770,7 +792,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
- if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
+ if (object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ pop(scratch1); // Return address.
@@ -786,32 +808,29 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
- int index;
- if (!transition.is_null()) {
- // Update the map of the object.
- __ Move(scratch1, transition);
- __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
- // Update the write barrier for the map field and pass the now unused
- // name_reg as scratch register.
- __ RecordWriteField(receiver_reg,
- HeapObject::kMapOffset,
- scratch1,
- name_reg,
- kDontSaveFPRegs,
- OMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- index = transition->instance_descriptors()->GetFieldIndex(
- transition->LastAdded());
- } else {
- index = lookup->GetFieldIndex().field_index();
- }
+ // Update the map of the object.
+ __ Move(scratch1, transition);
+ __ movq(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+ // Update the write barrier for the map field and pass the now unused
+ // name_reg as scratch register.
+ __ RecordWriteField(receiver_reg,
+ HeapObject::kMapOffset,
+ scratch1,
+ name_reg,
+ kDontSaveFPRegs,
+ OMIT_REMEMBERED_SET,
+ OMIT_SMI_CHECK);
+
+ int index = transition->instance_descriptors()->GetFieldIndex(
+ transition->LastAdded());
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
+ // TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@@ -842,21 +861,65 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-static void GenerateCheckPropertyCell(MacroAssembler* masm,
- Handle<GlobalObject> global,
- Handle<Name> name,
- Register scratch,
- Label* miss) {
- Handle<JSGlobalPropertyCell> cell =
- GlobalObject::EnsurePropertyCell(global, name);
- ASSERT(cell->value()->IsTheHole());
- __ Move(scratch, cell);
- __ Cmp(FieldOperand(scratch, JSGlobalPropertyCell::kValueOffset),
- masm->isolate()->factory()->the_hole_value());
- __ j(not_equal, miss);
+// Both name_reg and receiver_reg are preserved on jumps to miss_label,
+// but may be destroyed if store is successful.
+void StubCompiler::GenerateStoreField(MacroAssembler* masm,
+ Handle<JSObject> object,
+ LookupResult* lookup,
+ Register receiver_reg,
+ Register name_reg,
+ Register value_reg,
+ Register scratch1,
+ Register scratch2,
+ Label* miss_label) {
+ // Check that the map of the object hasn't changed.
+ __ CheckMap(receiver_reg, Handle<Map>(object->map()),
+ miss_label, DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
+
+ // Perform global security token check if needed.
+ if (object->IsJSGlobalProxy()) {
+ __ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
+ }
+
+ // Stub never generated for non-global objects that require access
+ // checks.
+ ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
+
+ int index = lookup->GetFieldIndex().field_index();
+
+ // Adjust for the number of properties stored in the object. Even in the
+ // face of a transition we can use the old map here because the size of the
+ // object and the number of in-object properties is not going to change.
+ index -= object->map()->inobject_properties();
+
+ // TODO(verwaest): Share this code as a code stub.
+ if (index < 0) {
+ // Set the property straight into the object.
+ int offset = object->map()->instance_size() + (index * kPointerSize);
+ __ movq(FieldOperand(receiver_reg, offset), value_reg);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ receiver_reg, offset, name_reg, scratch1, kDontSaveFPRegs);
+ } else {
+ // Write to the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ // Get the properties array (optimistically).
+ __ movq(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+ __ movq(FieldOperand(scratch1, offset), value_reg);
+
+ // Update the write barrier for the array address.
+ // Pass the value being stored in the now unused name_reg.
+ __ movq(name_reg, value_reg);
+ __ RecordWriteField(
+ scratch1, offset, name_reg, receiver_reg, kDontSaveFPRegs);
+ }
+
+ // Return the value (register rax).
+ ASSERT(value_reg.is(rax));
+ __ ret(0);
}
@@ -1077,7 +1140,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
Handle<GlobalObject> global) {
Label miss;
- Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
+ HandlerFrontendHeader(object, receiver(), last, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@@ -1085,13 +1148,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
- if (!last->HasFastProperties()) {
- __ movq(scratch2(), FieldOperand(reg, HeapObject::kMapOffset));
- __ movq(scratch2(), FieldOperand(scratch2(), Map::kPrototypeOffset));
- __ Cmp(scratch2(), isolate()->factory()->null_value());
- __ j(not_equal, &miss);
- }
-
HandlerFrontendFooter(success, &miss);
}
diff --git a/deps/v8/test/cctest/cctest.cc b/deps/v8/test/cctest/cctest.cc
index f1737602e..a0091ff6f 100644
--- a/deps/v8/test/cctest/cctest.cc
+++ b/deps/v8/test/cctest/cctest.cc
@@ -57,6 +57,27 @@ CcTest::CcTest(TestFunction* callback, const char* file, const char* name,
}
+v8::Persistent<v8::Context> CcTest::context_;
+
+
+void CcTest::InitializeVM(CcTestExtensionFlags extensions) {
+ const char* extension_names[kMaxExtensions];
+ int extension_count = 0;
+#define CHECK_EXTENSION_FLAG(Name, Id) \
+ if (extensions.Contains(Name##_ID)) extension_names[extension_count++] = Id;
+ EXTENSION_LIST(CHECK_EXTENSION_FLAG)
+#undef CHECK_EXTENSION_FLAG
+ if (context_.IsEmpty()) {
+ v8::Isolate* isolate = default_isolate();
+ v8::HandleScope scope(isolate);
+ v8::ExtensionConfiguration config(extension_count, extension_names);
+ v8::Local<v8::Context> context = v8::Context::New(isolate, &config);
+ context_ = v8::Persistent<v8::Context>::New(isolate, context);
+ }
+ context_->Enter();
+}
+
+
static void PrintTestList(CcTest* current) {
if (current == NULL) return;
PrintTestList(current->prev());
@@ -71,6 +92,7 @@ static void PrintTestList(CcTest* current) {
v8::Isolate* CcTest::default_isolate_;
+
int main(int argc, char* argv[]) {
v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
CcTest::set_default_isolate(v8::Isolate::GetCurrent());
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index ee9995b86..f58d95824 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -70,6 +70,7 @@
'test-fixed-dtoa.cc',
'test-flags.cc',
'test-func-name-inference.cc',
+ 'test-global-handles.cc',
'test-global-object.cc',
'test-hashing.cc',
'test-hashmap.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 30d9d7e20..854d89e15 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -51,6 +51,26 @@
static void Test##Name()
#endif
+#define EXTENSION_LIST(V) \
+ V(GC_EXTENSION, "v8/gc") \
+ V(PRINT_EXTENSION, "v8/print") \
+ V(TRACE_EXTENSION, "v8/trace")
+
+#define DEFINE_EXTENSION_ID(Name, Ident) Name##_ID,
+enum CcTestExtensionIds {
+ EXTENSION_LIST(DEFINE_EXTENSION_ID)
+ kMaxExtensions
+};
+#undef DEFINE_EXTENSION_ID
+
+typedef v8::internal::EnumSet<CcTestExtensionIds> CcTestExtensionFlags;
+#define DEFINE_EXTENSION_FLAG(Name, Ident) \
+ static const CcTestExtensionFlags Name(1 << Name##_ID);
+ static const CcTestExtensionFlags NO_EXTENSIONS(0);
+ static const CcTestExtensionFlags ALL_EXTENSIONS((1 << kMaxExtensions) - 1);
+ EXTENSION_LIST(DEFINE_EXTENSION_FLAG)
+#undef DEFINE_EXTENSION_FLAG
+
class CcTest {
public:
typedef void (TestFunction)();
@@ -67,6 +87,11 @@ class CcTest {
default_isolate_ = default_isolate;
}
static v8::Isolate* default_isolate() { return default_isolate_; }
+ static v8::Isolate* isolate() { return context_->GetIsolate(); }
+ static v8::Handle<v8::Context> env() { return context_; }
+
+ // Helper function to initialize the VM.
+ static void InitializeVM(CcTestExtensionFlags extensions = NO_EXTENSIONS);
private:
TestFunction* callback_;
@@ -74,9 +99,10 @@ class CcTest {
const char* name_;
const char* dependency_;
bool enabled_;
- static CcTest* last_;
CcTest* prev_;
+ static CcTest* last_;
static v8::Isolate* default_isolate_;
+ static v8::Persistent<v8::Context> context_;
};
// Switches between all the Api tests using the threading support.
diff --git a/deps/v8/test/cctest/cctest.status b/deps/v8/test/cctest/cctest.status
index b457ef24e..616f7124d 100644
--- a/deps/v8/test/cctest/cctest.status
+++ b/deps/v8/test/cctest/cctest.status
@@ -90,3 +90,16 @@ test-platform-tls/FastTLS: SKIP
# This test times out.
test-threads/ThreadJoinSelf: SKIP
+
+##############################################################################
+[ $arch == nacl_ia32 || $arch == nacl_x64 ]
+
+# These tests fail as there is no /tmp directory in Native Client.
+test-log/LogAccessorCallbacks: SKIP
+test-log/LogCallbacks: SKIP
+test-log/ProfLazyMode: SKIP
+
+# Native Client doesn't support sockets.
+test-debug/DebuggerAgent: SKIP
+test-debug/DebuggerAgentProtocolOverflowHeader: SKIP
+test-sockets/Socket: SKIP
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index a9780f033..acc47b9c7 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -48,14 +48,6 @@
static const bool kLogThreading = false;
-static bool IsNaN(double x) {
-#ifdef WIN32
- return _isnan(x);
-#else
- return isnan(x);
-#endif
-}
-
using ::v8::AccessorInfo;
using ::v8::Arguments;
using ::v8::Context;
@@ -2178,6 +2170,88 @@ THREADED_TEST(IdentityHash) {
}
+THREADED_TEST(SymbolProperties) {
+ i::FLAG_harmony_symbols = true;
+
+ LocalContext env;
+ v8::Isolate* isolate = env->GetIsolate();
+ v8::HandleScope scope(isolate);
+
+ v8::Local<v8::Object> obj = v8::Object::New();
+ v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
+ v8::Local<v8::Symbol> sym2 = v8::Symbol::New(isolate, "my-symbol");
+
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ // Check basic symbol functionality.
+ CHECK(sym1->IsSymbol());
+ CHECK(sym2->IsSymbol());
+ CHECK(!obj->IsSymbol());
+
+ CHECK(sym1->Equals(sym1));
+ CHECK(sym2->Equals(sym2));
+ CHECK(!sym1->Equals(sym2));
+ CHECK(!sym2->Equals(sym1));
+ CHECK(sym1->StrictEquals(sym1));
+ CHECK(sym2->StrictEquals(sym2));
+ CHECK(!sym1->StrictEquals(sym2));
+ CHECK(!sym2->StrictEquals(sym1));
+
+ CHECK(sym2->Name()->Equals(v8::String::New("my-symbol")));
+
+ v8::Local<v8::Value> sym_val = sym2;
+ CHECK(sym_val->IsSymbol());
+ CHECK(sym_val->Equals(sym2));
+ CHECK(sym_val->StrictEquals(sym2));
+ CHECK(v8::Symbol::Cast(*sym_val)->Equals(sym2));
+
+ v8::Local<v8::Value> sym_obj = v8::SymbolObject::New(isolate, sym2);
+ CHECK(sym_obj->IsSymbolObject());
+ CHECK(!sym2->IsSymbolObject());
+ CHECK(!obj->IsSymbolObject());
+ CHECK(sym_obj->Equals(sym2));
+ CHECK(!sym_obj->StrictEquals(sym2));
+ CHECK(v8::SymbolObject::Cast(*sym_obj)->Equals(sym_obj));
+ CHECK(v8::SymbolObject::Cast(*sym_obj)->SymbolValue()->Equals(sym2));
+
+ // Make sure delete of a non-existent symbol property works.
+ CHECK(obj->Delete(sym1));
+ CHECK(!obj->Has(sym1));
+
+ CHECK(obj->Set(sym1, v8::Integer::New(1503)));
+ CHECK(obj->Has(sym1));
+ CHECK_EQ(1503, obj->Get(sym1)->Int32Value());
+ CHECK(obj->Set(sym1, v8::Integer::New(2002)));
+ CHECK(obj->Has(sym1));
+ CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
+ CHECK_EQ(v8::None, obj->GetPropertyAttributes(sym1));
+
+ CHECK_EQ(0, obj->GetOwnPropertyNames()->Length());
+ int num_props = obj->GetPropertyNames()->Length();
+ CHECK(obj->Set(v8::String::New("bla"), v8::Integer::New(20)));
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+ CHECK_EQ(num_props + 1, obj->GetPropertyNames()->Length());
+
+ HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+ // Add another property and delete it afterwards to force the object in
+ // slow case.
+ CHECK(obj->Set(sym2, v8::Integer::New(2008)));
+ CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
+ CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
+ CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+
+ CHECK(obj->Has(sym1));
+ CHECK(obj->Has(sym2));
+ CHECK(obj->Delete(sym2));
+ CHECK(obj->Has(sym1));
+ CHECK(!obj->Has(sym2));
+ CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
+ CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+}
+
+
THREADED_TEST(HiddenProperties) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
@@ -3270,7 +3344,7 @@ THREADED_TEST(ConversionException) {
CheckUncle(&try_catch);
double number_value = obj->NumberValue();
- CHECK_NE(0, IsNaN(number_value));
+ CHECK_NE(0, isnan(number_value));
CheckUncle(&try_catch);
int64_t integer_value = obj->IntegerValue();
@@ -12417,7 +12491,7 @@ TEST(PreCompileSerialization) {
// Serialize.
int serialized_data_length = sd->Length();
char* serialized_data = i::NewArray<char>(serialized_data_length);
- memcpy(serialized_data, sd->Data(), serialized_data_length);
+ i::OS::MemCopy(serialized_data, sd->Data(), serialized_data_length);
// Deserialize.
v8::ScriptData* deserialized_sd =
@@ -13410,6 +13484,7 @@ v8::Persistent<Context> calling_context2;
static v8::Handle<Value> GetCallingContextCallback(const v8::Arguments& args) {
ApiTestFuzzer::Fuzz();
CHECK(Context::GetCurrent() == calling_context0);
+ CHECK(args.GetIsolate()->GetCurrentContext() == calling_context0);
CHECK(Context::GetCalling() == calling_context1);
CHECK(Context::GetEntered() == calling_context2);
return v8::Integer::New(42);
@@ -15356,21 +15431,21 @@ TEST(VisitExternalStrings) {
static double DoubleFromBits(uint64_t value) {
double target;
- memcpy(&target, &value, sizeof(target));
+ i::OS::MemCopy(&target, &value, sizeof(target));
return target;
}
static uint64_t DoubleToBits(double value) {
uint64_t target;
- memcpy(&target, &value, sizeof(target));
+ i::OS::MemCopy(&target, &value, sizeof(target));
return target;
}
static double DoubleToDateTime(double input) {
double date_limit = 864e13;
- if (IsNaN(input) || input < -date_limit || input > date_limit) {
+ if (isnan(input) || input < -date_limit || input > date_limit) {
return i::OS::nan_value();
}
return (input < 0) ? -(floor(-input)) : floor(input);
@@ -15431,7 +15506,7 @@ THREADED_TEST(QuietSignalingNaNs) {
// Check that Number::New preserves non-NaNs and quiets SNaNs.
v8::Handle<v8::Value> number = v8::Number::New(test_value);
double stored_number = number->NumberValue();
- if (!IsNaN(test_value)) {
+ if (!isnan(test_value)) {
CHECK_EQ(test_value, stored_number);
} else {
uint64_t stored_bits = DoubleToBits(stored_number);
@@ -15450,7 +15525,7 @@ THREADED_TEST(QuietSignalingNaNs) {
v8::Handle<v8::Value> date = v8::Date::New(test_value);
double expected_stored_date = DoubleToDateTime(test_value);
double stored_date = date->NumberValue();
- if (!IsNaN(expected_stored_date)) {
+ if (!isnan(expected_stored_date)) {
CHECK_EQ(expected_stored_date, stored_date);
} else {
uint64_t stored_bits = DoubleToBits(stored_date);
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index 5cb4ab323..58ce5ec8f 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -43,20 +43,10 @@ typedef Object* (*F3)(void* p0, int p1, int p2, int p3, int p4);
typedef Object* (*F4)(void* p0, void* p1, int p2, int p3, int p4);
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
#define __ assm.
TEST(0) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -83,7 +73,7 @@ TEST(0) {
TEST(1) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -121,7 +111,7 @@ TEST(1) {
TEST(2) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -168,7 +158,7 @@ TEST(2) {
TEST(3) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -224,7 +214,7 @@ TEST(3) {
TEST(4) {
// Test the VFP floating point instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -363,7 +353,7 @@ TEST(4) {
TEST(5) {
// Test the ARMv7 bitfield instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -400,7 +390,7 @@ TEST(5) {
TEST(6) {
// Test saturating instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -444,7 +434,7 @@ static void TestRoundingMode(VCVTTypes types,
double value,
int expected,
bool expected_exception = false) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -622,7 +612,7 @@ TEST(7) {
TEST(8) {
// Test VFP multi load/store with ia_w.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -654,87 +644,83 @@ TEST(8) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(&assm, VFP2);
-
- __ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
- __ sub(fp, ip, Operand(4));
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
- __ add(r4, r0, Operand(OFFSET_OF(D, a)));
- __ vldm(ia_w, r4, d0, d3);
- __ vldm(ia_w, r4, d4, d7);
+ __ add(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia_w, r4, d0, d3);
+ __ vldm(ia_w, r4, d4, d7);
- __ add(r4, r0, Operand(OFFSET_OF(D, a)));
- __ vstm(ia_w, r4, d6, d7);
- __ vstm(ia_w, r4, d0, d5);
+ __ add(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia_w, r4, d6, d7);
+ __ vstm(ia_w, r4, d0, d5);
- __ add(r4, r1, Operand(OFFSET_OF(F, a)));
- __ vldm(ia_w, r4, s0, s3);
- __ vldm(ia_w, r4, s4, s7);
+ __ add(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia_w, r4, s0, s3);
+ __ vldm(ia_w, r4, s4, s7);
- __ add(r4, r1, Operand(OFFSET_OF(F, a)));
- __ vstm(ia_w, r4, s6, s7);
- __ vstm(ia_w, r4, s0, s5);
+ __ add(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia_w, r4, s6, s7);
+ __ vstm(ia_w, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = isolate->heap()->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
- d.a = 1.1;
- d.b = 2.2;
- d.c = 3.3;
- d.d = 4.4;
- d.e = 5.5;
- d.f = 6.6;
- d.g = 7.7;
- d.h = 8.8;
-
- f.a = 1.0;
- f.b = 2.0;
- f.c = 3.0;
- f.d = 4.0;
- f.e = 5.0;
- f.f = 6.0;
- f.g = 7.0;
- f.h = 8.0;
-
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(7.7, d.a);
- CHECK_EQ(8.8, d.b);
- CHECK_EQ(1.1, d.c);
- CHECK_EQ(2.2, d.d);
- CHECK_EQ(3.3, d.e);
- CHECK_EQ(4.4, d.f);
- CHECK_EQ(5.5, d.g);
- CHECK_EQ(6.6, d.h);
-
- CHECK_EQ(7.0, f.a);
- CHECK_EQ(8.0, f.b);
- CHECK_EQ(1.0, f.c);
- CHECK_EQ(2.0, f.d);
- CHECK_EQ(3.0, f.e);
- CHECK_EQ(4.0, f.f);
- CHECK_EQ(5.0, f.g);
- CHECK_EQ(6.0, f.h);
- }
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
}
TEST(9) {
// Test VFP multi load/store with ia.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -766,91 +752,87 @@ TEST(9) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(&assm, VFP2);
-
- __ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
- __ sub(fp, ip, Operand(4));
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
- __ add(r4, r0, Operand(OFFSET_OF(D, a)));
- __ vldm(ia, r4, d0, d3);
- __ add(r4, r4, Operand(4 * 8));
- __ vldm(ia, r4, d4, d7);
+ __ add(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vldm(ia, r4, d0, d3);
+ __ add(r4, r4, Operand(4 * 8));
+ __ vldm(ia, r4, d4, d7);
- __ add(r4, r0, Operand(OFFSET_OF(D, a)));
- __ vstm(ia, r4, d6, d7);
- __ add(r4, r4, Operand(2 * 8));
- __ vstm(ia, r4, d0, d5);
+ __ add(r4, r0, Operand(OFFSET_OF(D, a)));
+ __ vstm(ia, r4, d6, d7);
+ __ add(r4, r4, Operand(2 * 8));
+ __ vstm(ia, r4, d0, d5);
- __ add(r4, r1, Operand(OFFSET_OF(F, a)));
- __ vldm(ia, r4, s0, s3);
- __ add(r4, r4, Operand(4 * 4));
- __ vldm(ia, r4, s4, s7);
+ __ add(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vldm(ia, r4, s0, s3);
+ __ add(r4, r4, Operand(4 * 4));
+ __ vldm(ia, r4, s4, s7);
- __ add(r4, r1, Operand(OFFSET_OF(F, a)));
- __ vstm(ia, r4, s6, s7);
- __ add(r4, r4, Operand(2 * 4));
- __ vstm(ia, r4, s0, s5);
+ __ add(r4, r1, Operand(OFFSET_OF(F, a)));
+ __ vstm(ia, r4, s6, s7);
+ __ add(r4, r4, Operand(2 * 4));
+ __ vstm(ia, r4, s0, s5);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = isolate->heap()->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
- d.a = 1.1;
- d.b = 2.2;
- d.c = 3.3;
- d.d = 4.4;
- d.e = 5.5;
- d.f = 6.6;
- d.g = 7.7;
- d.h = 8.8;
-
- f.a = 1.0;
- f.b = 2.0;
- f.c = 3.0;
- f.d = 4.0;
- f.e = 5.0;
- f.f = 6.0;
- f.g = 7.0;
- f.h = 8.0;
-
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(7.7, d.a);
- CHECK_EQ(8.8, d.b);
- CHECK_EQ(1.1, d.c);
- CHECK_EQ(2.2, d.d);
- CHECK_EQ(3.3, d.e);
- CHECK_EQ(4.4, d.f);
- CHECK_EQ(5.5, d.g);
- CHECK_EQ(6.6, d.h);
-
- CHECK_EQ(7.0, f.a);
- CHECK_EQ(8.0, f.b);
- CHECK_EQ(1.0, f.c);
- CHECK_EQ(2.0, f.d);
- CHECK_EQ(3.0, f.e);
- CHECK_EQ(4.0, f.f);
- CHECK_EQ(5.0, f.g);
- CHECK_EQ(6.0, f.h);
- }
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
}
TEST(10) {
// Test VFP multi load/store with db_w.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -882,87 +864,83 @@ TEST(10) {
// single precision values around in memory.
Assembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(VFP2)) {
- CpuFeatureScope scope(&assm, VFP2);
-
- __ mov(ip, Operand(sp));
- __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
- __ sub(fp, ip, Operand(4));
+ __ mov(ip, Operand(sp));
+ __ stm(db_w, sp, r4.bit() | fp.bit() | lr.bit());
+ __ sub(fp, ip, Operand(4));
- __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
- __ vldm(db_w, r4, d4, d7);
- __ vldm(db_w, r4, d0, d3);
+ __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vldm(db_w, r4, d4, d7);
+ __ vldm(db_w, r4, d0, d3);
- __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
- __ vstm(db_w, r4, d0, d5);
- __ vstm(db_w, r4, d6, d7);
+ __ add(r4, r0, Operand(OFFSET_OF(D, h) + 8));
+ __ vstm(db_w, r4, d0, d5);
+ __ vstm(db_w, r4, d6, d7);
- __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
- __ vldm(db_w, r4, s4, s7);
- __ vldm(db_w, r4, s0, s3);
+ __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vldm(db_w, r4, s4, s7);
+ __ vldm(db_w, r4, s0, s3);
- __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
- __ vstm(db_w, r4, s0, s5);
- __ vstm(db_w, r4, s6, s7);
+ __ add(r4, r1, Operand(OFFSET_OF(F, h) + 4));
+ __ vstm(db_w, r4, s0, s5);
+ __ vstm(db_w, r4, s6, s7);
- __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
+ __ ldm(ia_w, sp, r4.bit() | fp.bit() | pc.bit());
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = isolate->heap()->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
#ifdef DEBUG
- Code::cast(code)->Print();
+ Code::cast(code)->Print();
#endif
- F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
- d.a = 1.1;
- d.b = 2.2;
- d.c = 3.3;
- d.d = 4.4;
- d.e = 5.5;
- d.f = 6.6;
- d.g = 7.7;
- d.h = 8.8;
-
- f.a = 1.0;
- f.b = 2.0;
- f.c = 3.0;
- f.d = 4.0;
- f.e = 5.0;
- f.f = 6.0;
- f.g = 7.0;
- f.h = 8.0;
-
- Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
- USE(dummy);
+ F4 fn = FUNCTION_CAST<F4>(Code::cast(code)->entry());
+ d.a = 1.1;
+ d.b = 2.2;
+ d.c = 3.3;
+ d.d = 4.4;
+ d.e = 5.5;
+ d.f = 6.6;
+ d.g = 7.7;
+ d.h = 8.8;
+
+ f.a = 1.0;
+ f.b = 2.0;
+ f.c = 3.0;
+ f.d = 4.0;
+ f.e = 5.0;
+ f.f = 6.0;
+ f.g = 7.0;
+ f.h = 8.0;
+
+ Object* dummy = CALL_GENERATED_CODE(fn, &d, &f, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(7.7, d.a);
- CHECK_EQ(8.8, d.b);
- CHECK_EQ(1.1, d.c);
- CHECK_EQ(2.2, d.d);
- CHECK_EQ(3.3, d.e);
- CHECK_EQ(4.4, d.f);
- CHECK_EQ(5.5, d.g);
- CHECK_EQ(6.6, d.h);
-
- CHECK_EQ(7.0, f.a);
- CHECK_EQ(8.0, f.b);
- CHECK_EQ(1.0, f.c);
- CHECK_EQ(2.0, f.d);
- CHECK_EQ(3.0, f.e);
- CHECK_EQ(4.0, f.f);
- CHECK_EQ(5.0, f.g);
- CHECK_EQ(6.0, f.h);
- }
+ CHECK_EQ(7.7, d.a);
+ CHECK_EQ(8.8, d.b);
+ CHECK_EQ(1.1, d.c);
+ CHECK_EQ(2.2, d.d);
+ CHECK_EQ(3.3, d.e);
+ CHECK_EQ(4.4, d.f);
+ CHECK_EQ(5.5, d.g);
+ CHECK_EQ(6.6, d.h);
+
+ CHECK_EQ(7.0, f.a);
+ CHECK_EQ(8.0, f.b);
+ CHECK_EQ(1.0, f.c);
+ CHECK_EQ(2.0, f.d);
+ CHECK_EQ(3.0, f.e);
+ CHECK_EQ(4.0, f.f);
+ CHECK_EQ(5.0, f.g);
+ CHECK_EQ(6.0, f.h);
}
TEST(11) {
// Test instructions using the carry flag.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1028,7 +1006,7 @@ TEST(11) {
TEST(12) {
// Test chaining of label usages within instructions (issue 1644).
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1043,7 +1021,7 @@ TEST(12) {
TEST(13) {
// Test VFP instructions using registers d16-d31.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1159,4 +1137,84 @@ TEST(13) {
}
}
+
+TEST(14) {
+ // Test the VFP Canonicalized Nan mode.
+ CcTest::InitializeVM();
+ Isolate* isolate = Isolate::Current();
+ HandleScope scope(isolate);
+
+ typedef struct {
+ double left;
+ double right;
+ double add_result;
+ double sub_result;
+ double mul_result;
+ double div_result;
+ } T;
+ T t;
+
+ // Create a function that makes the four basic operations.
+ Assembler assm(isolate, NULL, 0);
+
+ // Ensure FPSCR state (as JSEntryStub does).
+ Label fpscr_done;
+ __ vmrs(r1);
+ __ tst(r1, Operand(kVFPDefaultNaNModeControlBit));
+ __ b(ne, &fpscr_done);
+ __ orr(r1, r1, Operand(kVFPDefaultNaNModeControlBit));
+ __ vmsr(r1);
+ __ bind(&fpscr_done);
+
+ __ vldr(d0, r0, OFFSET_OF(T, left));
+ __ vldr(d1, r0, OFFSET_OF(T, right));
+ __ vadd(d2, d0, d1);
+ __ vstr(d2, r0, OFFSET_OF(T, add_result));
+ __ vsub(d2, d0, d1);
+ __ vstr(d2, r0, OFFSET_OF(T, sub_result));
+ __ vmul(d2, d0, d1);
+ __ vstr(d2, r0, OFFSET_OF(T, mul_result));
+ __ vdiv(d2, d0, d1);
+ __ vstr(d2, r0, OFFSET_OF(T, div_result));
+
+ __ mov(pc, Operand(lr));
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = isolate->heap()->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+#ifdef DEBUG
+ Code::cast(code)->Print();
+#endif
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.left = BitCast<double>(kHoleNanInt64);
+ t.right = 1;
+ t.add_result = 0;
+ t.sub_result = 0;
+ t.mul_result = 0;
+ t.div_result = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ const uint32_t kArmNanUpper32 = 0x7ff80000;
+ const uint32_t kArmNanLower32 = 0x00000000;
+#ifdef DEBUG
+ const uint64_t kArmNanInt64 =
+ (static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
+ ASSERT(kArmNanInt64 != kHoleNanInt64);
+#endif
+ // With VFP2 the sign of the canonicalized Nan is undefined. So
+ // we remove the sign bit for the upper tests.
+ CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
+ CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
+ CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
+}
+
#undef __
diff --git a/deps/v8/test/cctest/test-assembler-ia32.cc b/deps/v8/test/cctest/test-assembler-ia32.cc
index 61762852a..8d39ee73f 100644
--- a/deps/v8/test/cctest/test-assembler-ia32.cc
+++ b/deps/v8/test/cctest/test-assembler-ia32.cc
@@ -44,21 +44,11 @@ typedef int (*F1)(int x);
typedef int (*F2)(int x, int y);
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
#define __ assm.
TEST(AssemblerIa320) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -86,8 +76,8 @@ TEST(AssemblerIa320) {
TEST(AssemblerIa321) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -125,8 +115,8 @@ TEST(AssemblerIa321) {
TEST(AssemblerIa322) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -170,10 +160,10 @@ TEST(AssemblerIa322) {
typedef int (*F3)(float x);
TEST(AssemblerIa323) {
- InitializeVM();
+ CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -206,10 +196,10 @@ TEST(AssemblerIa323) {
typedef int (*F4)(double x);
TEST(AssemblerIa324) {
- InitializeVM();
+ CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -240,8 +230,8 @@ TEST(AssemblerIa324) {
static int baz = 42;
TEST(AssemblerIa325) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
@@ -265,10 +255,10 @@ TEST(AssemblerIa325) {
typedef double (*F5)(double x, double y);
TEST(AssemblerIa326) {
- InitializeVM();
+ CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -311,10 +301,10 @@ TEST(AssemblerIa326) {
typedef double (*F6)(int x);
TEST(AssemblerIa328) {
- InitializeVM();
+ CcTest::InitializeVM();
if (!CpuFeatures::IsSupported(SSE2)) return;
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
Assembler assm(isolate, buffer, sizeof buffer);
@@ -348,8 +338,8 @@ TEST(AssemblerIa328) {
typedef int (*F7)(double x, double y);
TEST(AssemblerIa329) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[256];
MacroAssembler assm(isolate, buffer, sizeof buffer);
@@ -404,8 +394,8 @@ TEST(AssemblerIa329) {
TEST(AssemblerIa3210) {
// Test chaining of label usages within instructions (issue 1644).
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
@@ -418,8 +408,8 @@ TEST(AssemblerIa3210) {
TEST(AssemblerMultiByteNop) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[1024];
Assembler assm(isolate, buffer, sizeof(buffer));
diff --git a/deps/v8/test/cctest/test-assembler-mips.cc b/deps/v8/test/cctest/test-assembler-mips.cc
index 1e4e9e237..419ef3552 100644
--- a/deps/v8/test/cctest/test-assembler-mips.cc
+++ b/deps/v8/test/cctest/test-assembler-mips.cc
@@ -44,24 +44,11 @@ typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- // Disable compilation of natives.
- FLAG_disable_native_files = true;
-
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
#define __ assm.
TEST(MIPS0) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -87,7 +74,7 @@ TEST(MIPS0) {
TEST(MIPS1) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -126,7 +113,7 @@ TEST(MIPS1) {
TEST(MIPS2) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -268,7 +255,7 @@ TEST(MIPS2) {
TEST(MIPS3) {
// Test floating point instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -290,33 +277,30 @@ TEST(MIPS3) {
MacroAssembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ add_d(f8, f4, f6);
- __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ add_d(f8, f4, f6);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) ); // c = a + b.
- __ mov_d(f10, f8); // c
- __ neg_d(f12, f6); // -b
- __ sub_d(f10, f10, f12);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
+ __ mov_d(f10, f8); // c
+ __ neg_d(f12, f6); // -b
+ __ sub_d(f10, f10, f12);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) ); // d = c - (-b).
- __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) ); // b = a.
- __ li(t0, 120);
- __ mtc1(t0, f14);
- __ cvt_d_w(f14, f14); // f14 = 120.0.
- __ mul_d(f10, f10, f14);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
+ __ li(t0, 120);
+ __ mtc1(t0, f14);
+ __ cvt_d_w(f14, f14); // f14 = 120.0.
+ __ mul_d(f10, f10, f14);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) ); // e = d * 120 = 1.8066e16.
- __ div_d(f12, f10, f4);
- __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
+ __ div_d(f12, f10, f4);
+ __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) ); // f = e / a = 120.44.
- __ sqrt_d(f14, f12);
- __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
- // g = sqrt(f) = 10.97451593465515908537
+ __ sqrt_d(f14, f12);
+ __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
+ // g = sqrt(f) = 10.97451593465515908537
if (kArchVariant == kMips32r2) {
__ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
@@ -325,42 +309,41 @@ TEST(MIPS3) {
__ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
}
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e14;
- t.b = 2.75e11;
- t.c = 0.0;
- t.d = 0.0;
- t.e = 0.0;
- t.f = 0.0;
- t.h = 1.5;
- t.i = 2.75;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(1.5e14, t.a);
- CHECK_EQ(1.5e14, t.b);
- CHECK_EQ(1.50275e14, t.c);
- CHECK_EQ(1.50550e14, t.d);
- CHECK_EQ(1.8066e16, t.e);
- CHECK_EQ(120.44, t.f);
- CHECK_EQ(10.97451593465515908537, t.g);
- CHECK_EQ(6.875, t.h);
- }
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 0.0;
+ t.d = 0.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.h = 1.5;
+ t.i = 2.75;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(1.5e14, t.b);
+ CHECK_EQ(1.50275e14, t.c);
+ CHECK_EQ(1.50550e14, t.d);
+ CHECK_EQ(1.8066e16, t.e);
+ CHECK_EQ(120.44, t.f);
+ CHECK_EQ(10.97451593465515908537, t.g);
+ CHECK_EQ(6.875, t.h);
}
TEST(MIPS4) {
// Test moves between floating point and integer registers.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -374,54 +357,50 @@ TEST(MIPS4) {
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ // Swap f4 and f6, by using four integer registers, t0-t3.
+ __ mfc1(t0, f4);
+ __ mfc1(t1, f5);
+ __ mfc1(t2, f6);
+ __ mfc1(t3, f7);
- // Swap f4 and f6, by using four integer registers, t0-t3.
- __ mfc1(t0, f4);
- __ mfc1(t1, f5);
- __ mfc1(t2, f6);
- __ mfc1(t3, f7);
+ __ mtc1(t0, f6);
+ __ mtc1(t1, f7);
+ __ mtc1(t2, f4);
+ __ mtc1(t3, f5);
- __ mtc1(t0, f6);
- __ mtc1(t1, f7);
- __ mtc1(t2, f4);
- __ mtc1(t3, f5);
+ // Store the swapped f4 and f5 back to memory.
+ __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
- // Store the swapped f4 and f5 back to memory.
- __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
-
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e22;
- t.b = 2.75e11;
- t.c = 17.17;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e22;
+ t.b = 2.75e11;
+ t.c = 17.17;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(2.75e11, t.a);
- CHECK_EQ(2.75e11, t.b);
- CHECK_EQ(1.5e22, t.c);
- }
+ CHECK_EQ(2.75e11, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1.5e22, t.c);
}
TEST(MIPS5) {
// Test conversions between doubles and integers.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -436,64 +415,60 @@ TEST(MIPS5) {
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ // Load all structure elements to registers.
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ lw(t0, MemOperand(a0, OFFSET_OF(T, i)) );
+ __ lw(t1, MemOperand(a0, OFFSET_OF(T, j)) );
- // Load all structure elements to registers.
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ lw(t0, MemOperand(a0, OFFSET_OF(T, i)) );
- __ lw(t1, MemOperand(a0, OFFSET_OF(T, j)) );
-
- // Convert double in f4 to int in element i.
- __ cvt_w_d(f8, f4);
- __ mfc1(t2, f8);
- __ sw(t2, MemOperand(a0, OFFSET_OF(T, i)) );
-
- // Convert double in f6 to int in element j.
- __ cvt_w_d(f10, f6);
- __ mfc1(t3, f10);
- __ sw(t3, MemOperand(a0, OFFSET_OF(T, j)) );
-
- // Convert int in original i (t0) to double in a.
- __ mtc1(t0, f12);
- __ cvt_d_w(f0, f12);
- __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
-
- // Convert int in original j (t1) to double in b.
- __ mtc1(t1, f14);
- __ cvt_d_w(f2, f14);
- __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+ // Convert double in f4 to int in element i.
+ __ cvt_w_d(f8, f4);
+ __ mfc1(t2, f8);
+ __ sw(t2, MemOperand(a0, OFFSET_OF(T, i)) );
- __ jr(ra);
- __ nop();
+ // Convert double in f6 to int in element j.
+ __ cvt_w_d(f10, f6);
+ __ mfc1(t3, f10);
+ __ sw(t3, MemOperand(a0, OFFSET_OF(T, j)) );
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e4;
- t.b = 2.75e8;
- t.i = 12345678;
- t.j = -100000;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ // Convert int in original i (t0) to double in a.
+ __ mtc1(t0, f12);
+ __ cvt_d_w(f0, f12);
+ __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
- CHECK_EQ(12345678.0, t.a);
- CHECK_EQ(-100000.0, t.b);
- CHECK_EQ(15000, t.i);
- CHECK_EQ(275000000, t.j);
- }
+ // Convert int in original j (t1) to double in b.
+ __ mtc1(t1, f14);
+ __ cvt_d_w(f2, f14);
+ __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e4;
+ t.b = 2.75e8;
+ t.i = 12345678;
+ t.j = -100000;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+
+ CHECK_EQ(12345678.0, t.a);
+ CHECK_EQ(-100000.0, t.b);
+ CHECK_EQ(15000, t.i);
+ CHECK_EQ(275000000, t.j);
}
TEST(MIPS6) {
// Test simple memory loads and stores.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -566,7 +541,7 @@ TEST(MIPS6) {
TEST(MIPS7) {
// Test floating point compare and branch instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -586,69 +561,65 @@ TEST(MIPS7) {
MacroAssembler assm(isolate, NULL, 0);
Label neither_is_nan, less_than, outa_here;
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
+ __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+ __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+ __ c(UN, D, f4, f6);
+ __ bc1f(&neither_is_nan);
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
- __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
- __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
- __ c(UN, D, f4, f6);
- __ bc1f(&neither_is_nan);
- __ nop();
- __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
- __ Branch(&outa_here);
-
- __ bind(&neither_is_nan);
-
- if (kArchVariant == kLoongson) {
- __ c(OLT, D, f6, f4);
- __ bc1t(&less_than);
- } else {
- __ c(OLT, D, f6, f4, 2);
- __ bc1t(&less_than, 2);
- }
- __ nop();
- __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
- __ Branch(&outa_here);
+ __ bind(&neither_is_nan);
- __ bind(&less_than);
- __ Addu(t0, zero_reg, Operand(1));
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
+ if (kArchVariant == kLoongson) {
+ __ c(OLT, D, f6, f4);
+ __ bc1t(&less_than);
+ } else {
+ __ c(OLT, D, f6, f4, 2);
+ __ bc1t(&less_than, 2);
+ }
+ __ nop();
+ __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+ __ Branch(&outa_here);
+ __ bind(&less_than);
+ __ Addu(t0, zero_reg, Operand(1));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, result)) ); // Set true.
- // This test-case should have additional tests.
- __ bind(&outa_here);
+ // This test-case should have additional tests.
- __ jr(ra);
- __ nop();
+ __ bind(&outa_here);
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.a = 1.5e14;
- t.b = 2.75e11;
- t.c = 2.0;
- t.d = -4.0;
- t.e = 0.0;
- t.f = 0.0;
- t.result = 0;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
- CHECK_EQ(1.5e14, t.a);
- CHECK_EQ(2.75e11, t.b);
- CHECK_EQ(1, t.result);
- }
+ __ jr(ra);
+ __ nop();
+
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ t.a = 1.5e14;
+ t.b = 2.75e11;
+ t.c = 2.0;
+ t.d = -4.0;
+ t.e = 0.0;
+ t.f = 0.0;
+ t.result = 0;
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
+ CHECK_EQ(1.5e14, t.a);
+ CHECK_EQ(2.75e11, t.b);
+ CHECK_EQ(1, t.result);
}
TEST(MIPS8) {
// Test ROTR and ROTRV instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -753,7 +724,7 @@ TEST(MIPS8) {
TEST(MIPS9) {
// Test BRANCH improvements.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -783,7 +754,7 @@ TEST(MIPS9) {
TEST(MIPS10) {
// Test conversions between doubles and long integers.
// Test hos the long ints map to FP regs pairs.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -802,9 +773,7 @@ TEST(MIPS10) {
Assembler assm(isolate, NULL, 0);
Label L, C;
- if (CpuFeatures::IsSupported(FPU) && kArchVariant == kMips32r2) {
- CpuFeatureScope scope(&assm, FPU);
-
+ if (kArchVariant == kMips32r2) {
// Load all structure elements to registers.
__ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
@@ -858,7 +827,7 @@ TEST(MIPS10) {
TEST(MIPS11) {
// Test LWL, LWR, SWL and SWR instructions.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1002,7 +971,7 @@ TEST(MIPS11) {
TEST(MIPS12) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1094,7 +1063,7 @@ TEST(MIPS12) {
TEST(MIPS13) {
// Test Cvt_d_uw and Trunc_uw_d macros.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1110,54 +1079,50 @@ TEST(MIPS13) {
MacroAssembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
- __ Cvt_d_uw(f10, t0, f22);
- __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
+ __ Cvt_d_uw(f10, t0, f22);
+ __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
- __ Trunc_uw_d(f10, f10, f22);
- __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
+ __ Trunc_uw_d(f10, f10, f22);
+ __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
- __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
- __ Cvt_d_uw(f8, t0, f22);
- __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
+ __ sw(t0, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
+ __ Cvt_d_uw(f8, t0, f22);
+ __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
- __ Trunc_uw_d(f8, f8, f22);
- __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
+ __ Trunc_uw_d(f8, f8, f22);
+ __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.cvt_big_in = 0xFFFFFFFF;
- t.cvt_small_in = 333;
+ t.cvt_big_in = 0xFFFFFFFF;
+ t.cvt_small_in = 333;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
- CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
- CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
+ CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
+ CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
- CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
- CHECK_EQ(static_cast<int>(t.trunc_small_out),
- static_cast<int>(t.cvt_small_in));
- }
+ CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
+ CHECK_EQ(static_cast<int>(t.trunc_small_out),
+ static_cast<int>(t.cvt_small_in));
}
TEST(MIPS14) {
// Test round, floor, ceil, trunc, cvt.
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
@@ -1194,87 +1159,84 @@ TEST(MIPS14) {
MacroAssembler assm(isolate, NULL, 0);
- if (CpuFeatures::IsSupported(FPU)) {
- CpuFeatureScope scope(&assm, FPU);
-
- // Save FCSR.
- __ cfc1(a1, FCSR);
- // Disable FPU exceptions.
- __ ctc1(zero_reg, FCSR);
+ // Save FCSR.
+ __ cfc1(a1, FCSR);
+ // Disable FPU exceptions.
+ __ ctc1(zero_reg, FCSR);
#define RUN_ROUND_TEST(x) \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
- __ x##_w_d(f0, f0); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
- \
- __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
- __ ctc1(zero_reg, FCSR); \
- __ x##_w_d(f0, f0); \
- __ cfc1(a2, FCSR); \
- __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
- __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
-
- RUN_ROUND_TEST(round)
- RUN_ROUND_TEST(floor)
- RUN_ROUND_TEST(ceil)
- RUN_ROUND_TEST(trunc)
- RUN_ROUND_TEST(cvt)
-
- // Restore FCSR.
- __ ctc1(a1, FCSR);
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
+ __ x##_w_d(f0, f0); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
+ \
+ __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
+ __ ctc1(zero_reg, FCSR); \
+ __ x##_w_d(f0, f0); \
+ __ cfc1(a2, FCSR); \
+ __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
+ __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
+
+ RUN_ROUND_TEST(round)
+ RUN_ROUND_TEST(floor)
+ RUN_ROUND_TEST(ceil)
+ RUN_ROUND_TEST(trunc)
+ RUN_ROUND_TEST(cvt)
+
+ // Restore FCSR.
+ __ ctc1(a1, FCSR);
- __ jr(ra);
- __ nop();
+ __ jr(ra);
+ __ nop();
- CodeDesc desc;
- assm.GetCode(&desc);
- Object* code = HEAP->CreateCode(
- desc,
- Code::ComputeFlags(Code::STUB),
- Handle<Code>())->ToObjectChecked();
- CHECK(code->IsCode());
- F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
+ CodeDesc desc;
+ assm.GetCode(&desc);
+ Object* code = HEAP->CreateCode(
+ desc,
+ Code::ComputeFlags(Code::STUB),
+ Handle<Code>())->ToObjectChecked();
+ CHECK(code->IsCode());
+ F3 f = FUNCTION_CAST<F3>(Code::cast(code)->entry());
- t.round_up_in = 123.51;
- t.round_down_in = 123.49;
- t.neg_round_up_in = -123.5;
- t.neg_round_down_in = -123.49;
- t.err1_in = 123.51;
- t.err2_in = 1;
- t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
- t.err4_in = NAN;
+ t.round_up_in = 123.51;
+ t.round_down_in = 123.49;
+ t.neg_round_up_in = -123.5;
+ t.neg_round_down_in = -123.49;
+ t.err1_in = 123.51;
+ t.err2_in = 1;
+ t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
+ t.err4_in = NAN;
- Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
- USE(dummy);
+ Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+ USE(dummy);
#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
#define CHECK_ROUND_RESULT(type) \
@@ -1284,17 +1246,16 @@ TEST(MIPS14) {
CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
CHECK_EQ(kFPUInvalidResult, t.type##_invalid_result);
- CHECK_ROUND_RESULT(round);
- CHECK_ROUND_RESULT(floor);
- CHECK_ROUND_RESULT(ceil);
- CHECK_ROUND_RESULT(cvt);
- }
+ CHECK_ROUND_RESULT(round);
+ CHECK_ROUND_RESULT(floor);
+ CHECK_ROUND_RESULT(ceil);
+ CHECK_ROUND_RESULT(cvt);
}
TEST(MIPS15) {
// Test chaining of label usages within instructions (issue 1644).
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
Assembler assm(isolate, NULL, 0);
diff --git a/deps/v8/test/cctest/test-assembler-x64.cc b/deps/v8/test/cctest/test-assembler-x64.cc
index d2ea77158..669475ad8 100644
--- a/deps/v8/test/cctest/test-assembler-x64.cc
+++ b/deps/v8/test/cctest/test-assembler-x64.cc
@@ -88,16 +88,6 @@ static const v8::internal::Register arg2 = rsi;
#define __ assm.
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
TEST(AssemblerX64ReturnOperation) {
OS::SetUp();
// Allocate an executable page of memory.
@@ -361,8 +351,8 @@ TEST(OperandRegisterDependency) {
TEST(AssemblerX64LabelChaining) {
// Test chaining of label usages within instructions (issue 1644).
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
Assembler assm(Isolate::Current(), NULL, 0);
Label target;
@@ -374,8 +364,8 @@ TEST(AssemblerX64LabelChaining) {
TEST(AssemblerMultiByteNop) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::internal::byte buffer[1024];
Isolate* isolate = Isolate::Current();
Assembler assm(isolate, buffer, sizeof(buffer));
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 6c100b09d..f20043548 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -40,8 +40,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
// --- P r i n t E x t e n s i o n ---
class PrintExtension : public v8::Extension {
@@ -81,16 +79,6 @@ static PrintExtension kPrintExtension;
v8::DeclareExtension kPrintExtensionDeclaration(&kPrintExtension);
-static void InitializeVM() {
- if (env.IsEmpty()) {
- const char* extensions[] = { "v8/print", "v8/gc" };
- v8::ExtensionConfiguration config(2, extensions);
- env = v8::Context::New(&config);
- }
- env->Enter();
-}
-
-
static MaybeObject* GetGlobalProperty(const char* name) {
Handle<String> internalized_name = FACTORY->InternalizeUtf8String(name);
return Isolate::Current()->context()->global_object()->GetProperty(
@@ -142,8 +130,8 @@ static double Inc(int x) {
TEST(Inc) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(4.0, Inc(3));
}
@@ -163,8 +151,8 @@ static double Add(int x, int y) {
TEST(Add) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(5.0, Add(2, 3));
}
@@ -183,8 +171,8 @@ static double Abs(int x) {
TEST(Abs) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(3.0, Abs(-3));
}
@@ -204,15 +192,15 @@ static double Sum(int n) {
TEST(Sum) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(5050.0, Sum(100));
}
TEST(Print) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM(PRINT_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "for (n = 0; n < 100; ++n) print(n, 1, 2);";
Handle<JSFunction> fun = Compile(source);
if (fun.is_null()) return;
@@ -226,8 +214,8 @@ TEST(Print) {
// The following test method stems from my coding efforts today. It
// tests all the functionality I have added to the compiler today
TEST(Stuff) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source =
"r = 0;\n"
"a = new Object;\n"
@@ -258,8 +246,8 @@ TEST(Stuff) {
TEST(UncaughtThrow) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "throw 42;";
Handle<JSFunction> fun = Compile(source);
@@ -280,8 +268,8 @@ TEST(UncaughtThrow) {
// | JS |
// | C-to-JS |
TEST(C2JSFrames) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM(PRINT_EXTENSION | GC_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo(a) { gc(), print(a); }";
@@ -317,8 +305,8 @@ TEST(C2JSFrames) {
// Regression 236. Calling InitLineEnds on a Script with undefined
// source resulted in crash.
TEST(Regression236) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
Handle<Script> script = FACTORY->NewScript(FACTORY->empty_string());
script->set_source(HEAP->undefined_value());
@@ -329,8 +317,8 @@ TEST(Regression236) {
TEST(GetScriptLineNumber) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::ScriptOrigin origin = v8::ScriptOrigin(v8::String::New("test"));
const char function_f[] = "function f() {}";
const int max_rows = 1000;
@@ -342,11 +330,11 @@ TEST(GetScriptLineNumber) {
for (int i = 0; i < max_rows; ++i) {
if (i > 0)
buffer[i - 1] = '\n';
- memcpy(&buffer[i], function_f, sizeof(function_f) - 1);
+ OS::MemCopy(&buffer[i], function_f, sizeof(function_f) - 1);
v8::Handle<v8::String> script_body = v8::String::New(buffer.start());
v8::Script::Compile(script_body, &origin)->Run();
v8::Local<v8::Function> f = v8::Local<v8::Function>::Cast(
- env->Global()->Get(v8::String::New("f")));
+ CcTest::env()->Global()->Get(v8::String::New("f")));
CHECK_EQ(i, f->GetScriptLineNumber());
}
}
@@ -359,8 +347,8 @@ TEST(OptimizedCodeSharing) {
// FastNewClosureStub that is baked into the snapshot is incorrect.
if (!FLAG_cache_optimized_code) return;
FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
for (int i = 0; i < 10; i++) {
LocalContext env;
env->Global()->Set(v8::String::New("x"), v8::Integer::New(i));
@@ -402,7 +390,7 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
Address pc = f->code()->instruction_start();
int decode_size =
Min(f->code()->instruction_size(),
- static_cast<int>(f->code()->stack_check_table_offset()));
+ static_cast<int>(f->code()->back_edge_table_offset()));
Address end = pc + decode_size;
v8::internal::EmbeddedVector<char, 128> decode_buffer;
@@ -423,16 +411,16 @@ static void CheckCodeForUnsafeLiteral(Handle<JSFunction> f) {
TEST(SplitConstantsInFullCompiler) {
- LocalContext env;
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CompileRun("function f() { a = 12345678 }; f();");
- CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
CompileRun("function f(x) { a = 12345678 + x}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
CompileRun("function f(x) { var arguments = 1; x += 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
CompileRun("function f(x) { var arguments = 1; x = 12345678}; f(1);");
- CheckCodeForUnsafeLiteral(GetJSFunction(env->Global(), "f"));
+ CheckCodeForUnsafeLiteral(GetJSFunction(CcTest::env()->Global(), "f"));
}
#endif
diff --git a/deps/v8/test/cctest/test-cpu-profiler.cc b/deps/v8/test/cctest/test-cpu-profiler.cc
index 0bf80003f..2eece4623 100644
--- a/deps/v8/test/cctest/test-cpu-profiler.cc
+++ b/deps/v8/test/cctest/test-cpu-profiler.cc
@@ -30,6 +30,7 @@
#include "v8.h"
#include "cpu-profiler-inl.h"
#include "cctest.h"
+#include "utils.h"
#include "../include/v8-profiler.h"
using i::CodeEntry;
@@ -39,7 +40,9 @@ using i::CpuProfilesCollection;
using i::ProfileGenerator;
using i::ProfileNode;
using i::ProfilerEventsProcessor;
+using i::ScopedVector;
using i::TokenEnumerator;
+using i::Vector;
TEST(StartStop) {
@@ -51,14 +54,6 @@ TEST(StartStop) {
processor.Join();
}
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) env = v8::Context::New();
- v8::HandleScope scope(env->GetIsolate());
- env->Enter();
-}
-
static inline i::Address ToAddress(int n) {
return reinterpret_cast<i::Address>(n);
}
@@ -69,7 +64,6 @@ static void EnqueueTickSampleEvent(ProfilerEventsProcessor* proc,
i::Address frame3 = NULL) {
i::TickSample* sample = proc->TickSampleEvent();
sample->pc = frame1;
- sample->tos = frame1;
sample->frames_count = 0;
if (frame2 != NULL) {
sample->stack[0] = frame2;
@@ -101,7 +95,7 @@ class TestSetup {
} // namespace
TEST(CodeEvents) {
- InitializeVM();
+ CcTest::InitializeVM();
i::Isolate* isolate = i::Isolate::Current();
i::Heap* heap = isolate->heap();
i::Factory* factory = isolate->factory();
@@ -217,7 +211,7 @@ TEST(TickEvents) {
// http://crbug/51594
// This test must not crash.
TEST(CrashIfStoppingLastNonExistentProfile) {
- InitializeVM();
+ CcTest::InitializeVM();
TestSetup test_setup;
CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
profiler->StartProfiling("1");
@@ -244,7 +238,6 @@ TEST(Issue1398) {
i::TickSample* sample = processor.TickSampleEvent();
sample->pc = ToAddress(0x1200);
- sample->tos = 0;
sample->frames_count = i::TickSample::kMaxFramesCount;
for (int i = 0; i < sample->frames_count; ++i) {
sample->stack[i] = ToAddress(0x1200);
@@ -268,7 +261,7 @@ TEST(Issue1398) {
TEST(DeleteAllCpuProfiles) {
- InitializeVM();
+ CcTest::InitializeVM();
TestSetup test_setup;
CpuProfiler* profiler = i::Isolate::Current()->cpu_profiler();
CHECK_EQ(0, profiler->GetProfilesCount());
@@ -399,3 +392,154 @@ TEST(DeleteCpuProfileDifferentTokens) {
CHECK_EQ(0, cpu_profiler->GetProfileCount());
CHECK_EQ(NULL, cpu_profiler->FindCpuProfile(uid3));
}
+
+
+static bool ContainsString(v8::Handle<v8::String> string,
+ const Vector<v8::Handle<v8::String> >& vector) {
+ for (int i = 0; i < vector.length(); i++) {
+ if (string->Equals(vector[i]))
+ return true;
+ }
+ return false;
+}
+
+
+static void CheckChildrenNames(const v8::CpuProfileNode* node,
+ const Vector<v8::Handle<v8::String> >& names) {
+ int count = node->GetChildrenCount();
+ for (int i = 0; i < count; i++) {
+ v8::Handle<v8::String> name = node->GetChild(i)->GetFunctionName();
+ CHECK(ContainsString(name, names));
+ // Check that there are no duplicates.
+ for (int j = 0; j < count; j++) {
+ if (j == i) continue;
+ CHECK_NE(name, node->GetChild(j)->GetFunctionName());
+ }
+ }
+}
+
+
+static const v8::CpuProfileNode* FindChild(const v8::CpuProfileNode* node,
+ const char* name) {
+ int count = node->GetChildrenCount();
+ v8::Handle<v8::String> nameHandle = v8::String::New(name);
+ for (int i = 0; i < count; i++) {
+ const v8::CpuProfileNode* child = node->GetChild(i);
+ if (nameHandle->Equals(child->GetFunctionName())) return child;
+ }
+ CHECK(false);
+ return NULL;
+}
+
+
+static void CheckSimpleBranch(const v8::CpuProfileNode* node,
+ const char* names[], int length) {
+ for (int i = 0; i < length; i++) {
+ const char* name = names[i];
+ node = FindChild(node, name);
+ CHECK(node);
+ int expectedChildrenCount = (i == length - 1) ? 0 : 1;
+ CHECK_EQ(expectedChildrenCount, node->GetChildrenCount());
+ }
+}
+
+
+static const char* cpu_profiler_test_source = "function loop(timeout) {\n"
+" this.mmm = 0;\n"
+" var start = Date.now();\n"
+" while (Date.now() - start < timeout) {\n"
+" var n = 100*1000;\n"
+" while(n > 1) {\n"
+" n--;\n"
+" this.mmm += n * n * n;\n"
+" }\n"
+" }\n"
+"}\n"
+"function delay() { try { loop(10); } catch(e) { } }\n"
+"function bar() { delay(); }\n"
+"function baz() { delay(); }\n"
+"function foo() {\n"
+" try {\n"
+" delay();\n"
+" bar();\n"
+" delay();\n"
+" baz();\n"
+" } catch (e) { }\n"
+"}\n"
+"function start(timeout) {\n"
+" var start = Date.now();\n"
+" do {\n"
+" foo();\n"
+" var duration = Date.now() - start;\n"
+" } while (duration < timeout);\n"
+" return duration;\n"
+"}\n";
+
+
+// Check that the profile tree for the script above will look like the
+// following:
+//
+// [Top down]:
+// 1062 0 (root) [-1]
+// 1054 0 start [-1]
+// 1054 1 foo [-1]
+// 265 0 baz [-1]
+// 265 1 delay [-1]
+// 264 264 loop [-1]
+// 525 3 delay [-1]
+// 522 522 loop [-1]
+// 263 0 bar [-1]
+// 263 1 delay [-1]
+// 262 262 loop [-1]
+// 2 2 (program) [-1]
+// 6 6 (garbage collector) [-1]
+TEST(CollectCpuProfile) {
+ LocalContext env;
+ v8::HandleScope scope(env->GetIsolate());
+
+ v8::Script::Compile(v8::String::New(cpu_profiler_test_source))->Run();
+ v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+ env->Global()->Get(v8::String::New("start")));
+
+ v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+ v8::Local<v8::String> profile_name = v8::String::New("my_profile");
+
+ cpu_profiler->StartCpuProfiling(profile_name);
+ int32_t profiling_interval_ms = 200;
+#if defined(_WIN32) || defined(_WIN64)
+ // 200ms is not enough on Windows. See
+ // https://code.google.com/p/v8/issues/detail?id=2628
+ profiling_interval_ms = 500;
+#endif
+ v8::Handle<v8::Value> args[] = { v8::Integer::New(profiling_interval_ms) };
+ function->Call(env->Global(), ARRAY_SIZE(args), args);
+ const v8::CpuProfile* profile = cpu_profiler->StopCpuProfiling(profile_name);
+
+ CHECK_NE(NULL, profile);
+ // Dump collected profile to have a better diagnostic in case of failure.
+ reinterpret_cast<i::CpuProfile*>(
+ const_cast<v8::CpuProfile*>(profile))->Print();
+
+ const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+
+ ScopedVector<v8::Handle<v8::String> > names(3);
+ names[0] = v8::String::New(ProfileGenerator::kGarbageCollectorEntryName);
+ names[1] = v8::String::New(ProfileGenerator::kProgramEntryName);
+ names[2] = v8::String::New("start");
+ CheckChildrenNames(root, names);
+
+ const v8::CpuProfileNode* startNode = FindChild(root, "start");
+ CHECK_EQ(1, startNode->GetChildrenCount());
+
+ const v8::CpuProfileNode* fooNode = FindChild(startNode, "foo");
+ CHECK_EQ(3, fooNode->GetChildrenCount());
+
+ const char* barBranch[] = { "bar", "delay", "loop" };
+ CheckSimpleBranch(fooNode, barBranch, ARRAY_SIZE(barBranch));
+ const char* bazBranch[] = { "baz", "delay", "loop" };
+ CheckSimpleBranch(fooNode, bazBranch, ARRAY_SIZE(bazBranch));
+ const char* delayBranch[] = { "delay", "loop" };
+ CheckSimpleBranch(fooNode, delayBranch, ARRAY_SIZE(delayBranch));
+
+ cpu_profiler->DeleteAllCpuProfiles();
+}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index ae2ec712a..ed8da5c1b 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -695,7 +695,7 @@ TEST(ExistsInHiddenPrototype) {
class SimpleContext {
public:
SimpleContext() {
- context_ = Context::New(0);
+ context_ = Context::New();
context_->Enter();
}
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index c7c770f55..9f12232f7 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -40,15 +40,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
@@ -73,7 +64,7 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
#define SET_UP() \
- InitializeVM(); \
+ CcTest::InitializeVM(); \
Isolate* isolate = Isolate::Current(); \
HandleScope scope(isolate); \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
diff --git a/deps/v8/test/cctest/test-disasm-ia32.cc b/deps/v8/test/cctest/test-disasm-ia32.cc
index 8240a9546..ca81a5a4f 100644
--- a/deps/v8/test/cctest/test-disasm-ia32.cc
+++ b/deps/v8/test/cctest/test-disasm-ia32.cc
@@ -38,14 +38,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
#define __ assm.
@@ -55,8 +47,8 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmIa320) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
HandleScope scope(isolate);
v8::internal::byte buffer[2048];
Assembler assm(isolate, buffer, sizeof buffer);
diff --git a/deps/v8/test/cctest/test-disasm-mips.cc b/deps/v8/test/cctest/test-disasm-mips.cc
index 37aa5eb06..0e79a580f 100644
--- a/deps/v8/test/cctest/test-disasm-mips.cc
+++ b/deps/v8/test/cctest/test-disasm-mips.cc
@@ -40,17 +40,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- // Disable compilation of natives.
- FLAG_disable_native_files = true;
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
-
bool DisassembleAndCompare(byte* pc, const char* compare_string) {
disasm::NameConverter converter;
disasm::Disassembler disasm(converter);
@@ -75,7 +64,7 @@ bool DisassembleAndCompare(byte* pc, const char* compare_string) {
// disassembler. Declare the variables and allocate the data structures used
// in the rest of the macros.
#define SET_UP() \
- InitializeVM(); \
+ CcTest::InitializeVM(); \
Isolate* isolate = Isolate::Current(); \
HandleScope scope(isolate); \
byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
diff --git a/deps/v8/test/cctest/test-disasm-x64.cc b/deps/v8/test/cctest/test-disasm-x64.cc
index fe7aab811..1c7f41639 100644
--- a/deps/v8/test/cctest/test-disasm-x64.cc
+++ b/deps/v8/test/cctest/test-disasm-x64.cc
@@ -38,14 +38,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
-}
-
#define __ assm.
@@ -55,7 +47,7 @@ static void DummyStaticFunction(Object* result) {
TEST(DisasmX64) {
- InitializeVM();
+ CcTest::InitializeVM();
v8::HandleScope scope;
v8::internal::byte buffer[2048];
Assembler assm(Isolate::Current(), buffer, sizeof buffer);
diff --git a/deps/v8/test/cctest/test-func-name-inference.cc b/deps/v8/test/cctest/test-func-name-inference.cc
index 364a4f6a6..5b8293fa4 100644
--- a/deps/v8/test/cctest/test-func-name-inference.cc
+++ b/deps/v8/test/cctest/test-func-name-inference.cc
@@ -47,17 +47,6 @@ using ::v8::internal::SharedFunctionInfo;
using ::v8::internal::String;
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- env = v8::Context::New();
- }
- env->Enter();
-}
-
-
static void CheckFunctionName(v8::Handle<v8::Script> script,
const char* func_pos_src,
const char* ref_inferred_name) {
@@ -108,8 +97,8 @@ static v8::Handle<v8::Script> Compile(const char* src) {
TEST(GlobalProperty) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"fun1 = function() { return 1; }\n"
@@ -120,8 +109,8 @@ TEST(GlobalProperty) {
TEST(GlobalVar) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"var fun1 = function() { return 1; }\n"
@@ -132,8 +121,8 @@ TEST(GlobalVar) {
TEST(LocalVar) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function outer() {\n"
@@ -146,8 +135,8 @@ TEST(LocalVar) {
TEST(InConstructor) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function MyClass() {\n"
@@ -160,8 +149,8 @@ TEST(InConstructor) {
TEST(Factory) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function createMyObj() {\n"
@@ -176,8 +165,8 @@ TEST(Factory) {
TEST(Static) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function MyClass() {}\n"
@@ -194,8 +183,8 @@ TEST(Static) {
TEST(Prototype) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function MyClass() {}\n"
@@ -212,8 +201,8 @@ TEST(Prototype) {
TEST(ObjectLiteral) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function MyClass() {}\n"
@@ -226,8 +215,8 @@ TEST(ObjectLiteral) {
TEST(AsParameter) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function f1(a) { return a(); }\n"
@@ -242,8 +231,8 @@ TEST(AsParameter) {
TEST(MultipleFuncsConditional) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"fun1 = 0 ?\n"
@@ -255,8 +244,8 @@ TEST(MultipleFuncsConditional) {
TEST(MultipleFuncsInLiteral) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function MyClass() {}\n"
@@ -270,8 +259,8 @@ TEST(MultipleFuncsInLiteral) {
// See http://code.google.com/p/v8/issues/detail?id=380
TEST(Issue380) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function a() {\n"
@@ -283,8 +272,8 @@ TEST(Issue380) {
TEST(MultipleAssignments) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"var fun1 = fun2 = function () { return 1; }\n"
@@ -299,8 +288,8 @@ TEST(MultipleAssignments) {
TEST(AsConstructorParameter) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function Foo() {}\n"
@@ -313,8 +302,8 @@ TEST(AsConstructorParameter) {
TEST(FactoryHashmap) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function createMyObj() {\n"
@@ -329,8 +318,8 @@ TEST(FactoryHashmap) {
TEST(FactoryHashmapVariable) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function createMyObj() {\n"
@@ -348,8 +337,8 @@ TEST(FactoryHashmapVariable) {
TEST(FactoryHashmapConditional) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"function createMyObj() {\n"
@@ -363,8 +352,8 @@ TEST(FactoryHashmapConditional) {
TEST(GlobalAssignmentAndCall) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"var Foo = function() {\n"
@@ -381,8 +370,8 @@ TEST(GlobalAssignmentAndCall) {
TEST(AssignmentAndCall) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"(function Enclosing() {\n"
@@ -404,8 +393,8 @@ TEST(AssignmentAndCall) {
TEST(MethodAssignmentInAnonymousFunctionCall) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"(function () {\n"
@@ -420,8 +409,8 @@ TEST(MethodAssignmentInAnonymousFunctionCall) {
TEST(ReturnAnonymousFunction) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::Script> script = Compile(
"(function() {\n"
diff --git a/deps/v8/test/cctest/test-global-handles.cc b/deps/v8/test/cctest/test-global-handles.cc
new file mode 100644
index 000000000..1959a4050
--- /dev/null
+++ b/deps/v8/test/cctest/test-global-handles.cc
@@ -0,0 +1,198 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "global-handles.h"
+
+#include "cctest.h"
+
+using namespace v8::internal;
+
+static int NumberOfWeakCalls = 0;
+static void WeakPointerCallback(v8::Isolate* isolate,
+ v8::Persistent<v8::Value> handle,
+ void* id) {
+ ASSERT(id == reinterpret_cast<void*>(1234));
+ NumberOfWeakCalls++;
+ handle.Dispose(isolate);
+}
+
+static List<Object*> skippable_objects;
+static List<Object*> can_skip_called_objects;
+
+static bool CanSkipCallback(Heap* heap, Object** pointer) {
+ can_skip_called_objects.Add(*pointer);
+ return skippable_objects.Contains(*pointer);
+}
+
+static void ResetCanSkipData() {
+ skippable_objects.Clear();
+ can_skip_called_objects.Clear();
+}
+
+class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
+ public:
+ TestRetainedObjectInfo() : has_been_disposed_(false) {}
+
+ bool has_been_disposed() { return has_been_disposed_; }
+
+ virtual void Dispose() {
+ ASSERT(!has_been_disposed_);
+ has_been_disposed_ = true;
+ }
+
+ virtual bool IsEquivalent(v8::RetainedObjectInfo* other) {
+ return other == this;
+ }
+
+ virtual intptr_t GetHash() { return 0; }
+
+ virtual const char* GetLabel() { return "whatever"; }
+
+ private:
+ bool has_been_disposed_;
+};
+
+class TestObjectVisitor : public ObjectVisitor {
+ public:
+ virtual void VisitPointers(Object** start, Object** end) {
+ for (Object** o = start; o != end; ++o)
+ visited.Add(*o);
+ }
+
+ List<Object*> visited;
+};
+
+TEST(IterateObjectGroupsOldApi) {
+ CcTest::InitializeVM();
+ GlobalHandles* global_handles = Isolate::Current()->global_handles();
+
+ v8::HandleScope handle_scope(CcTest::isolate());
+
+ Handle<Object> g1s1 =
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ Handle<Object> g1s2 =
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->MakeWeak(g1s1.location(),
+ reinterpret_cast<void*>(1234),
+ NULL,
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g1s2.location(),
+ reinterpret_cast<void*>(1234),
+ NULL,
+ &WeakPointerCallback);
+
+ Handle<Object> g2s1 =
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ Handle<Object> g2s2 =
+ global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
+ global_handles->MakeWeak(g2s1.location(),
+ reinterpret_cast<void*>(1234),
+ NULL,
+ &WeakPointerCallback);
+ global_handles->MakeWeak(g2s2.location(),
+ reinterpret_cast<void*>(1234),
+ NULL,
+ &WeakPointerCallback);
+
+ TestRetainedObjectInfo info1;
+ TestRetainedObjectInfo info2;
+ {
+ Object** g1_objects[] = { g1s1.location(), g1s2.location() };
+ Object** g2_objects[] = { g2s1.location(), g2s2.location() };
+
+ global_handles->AddObjectGroup(g1_objects, 2, &info1);
+ global_handles->AddObjectGroup(g2_objects, 2, &info2);
+ }
+
+ // Iterate the object groups. First skip all.
+ {
+ ResetCanSkipData();
+ skippable_objects.Add(*g1s1.location());
+ skippable_objects.Add(*g1s2.location());
+ skippable_objects.Add(*g2s1.location());
+ skippable_objects.Add(*g2s2.location());
+ TestObjectVisitor visitor;
+ global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
+
+ // CanSkipCallback was called for all objects.
+ ASSERT(can_skip_called_objects.length() == 4);
+ ASSERT(can_skip_called_objects.Contains(*g1s1.location()));
+ ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
+ ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
+ ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+
+ // Nothing was visited.
+ ASSERT(visitor.visited.length() == 0);
+ ASSERT(!info1.has_been_disposed());
+ ASSERT(!info2.has_been_disposed());
+ }
+
+ // Iterate again, now only skip the second object group.
+ {
+ ResetCanSkipData();
+ // The first grough should still be visited, since only one object is
+ // skipped.
+ skippable_objects.Add(*g1s1.location());
+ skippable_objects.Add(*g2s1.location());
+ skippable_objects.Add(*g2s2.location());
+ TestObjectVisitor visitor;
+ global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
+
+ // CanSkipCallback was called for all objects.
+ ASSERT(can_skip_called_objects.length() == 3 ||
+ can_skip_called_objects.length() == 4);
+ ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
+ ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
+ ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+
+ // The first group was visited.
+ ASSERT(visitor.visited.length() == 2);
+ ASSERT(visitor.visited.Contains(*g1s1.location()));
+ ASSERT(visitor.visited.Contains(*g1s2.location()));
+ ASSERT(info1.has_been_disposed());
+ ASSERT(!info2.has_been_disposed());
+ }
+
+ // Iterate again, don't skip anything.
+ {
+ ResetCanSkipData();
+ TestObjectVisitor visitor;
+ global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
+
+ // CanSkipCallback was called for all objects.
+ fprintf(stderr, "can skip len %d\n", can_skip_called_objects.length());
+ ASSERT(can_skip_called_objects.length() == 1);
+ ASSERT(can_skip_called_objects.Contains(*g2s1.location()) ||
+ can_skip_called_objects.Contains(*g2s2.location()));
+
+ // The second group was visited.
+ ASSERT(visitor.visited.length() == 2);
+ ASSERT(visitor.visited.Contains(*g2s1.location()));
+ ASSERT(visitor.visited.Contains(*g2s2.location()));
+ ASSERT(info2.has_been_disposed());
+ }
+}
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index a536f30b5..59e7b8f25 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -571,7 +571,7 @@ class TestJSONStream : public v8::OutputStream {
if (abort_countdown_ == 0) return kAbort;
CHECK_GT(chars_written, 0);
i::Vector<char> chunk = buffer_.AddBlock(chars_written, '\0');
- memcpy(chunk.start(), buffer, chars_written);
+ i::OS::MemCopy(chunk.start(), buffer, chars_written);
return kContinue;
}
virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
@@ -1663,41 +1663,6 @@ TEST(NoDebugObjectInSnapshot) {
#endif // ENABLE_DEBUGGER_SUPPORT
-TEST(PersistentHandleCount) {
- LocalContext env;
- v8::Isolate* isolate = env->GetIsolate();
- v8::HandleScope scope(isolate);
-
- // V8 also uses global handles internally, so we can't test for an absolute
- // number.
- int global_handle_count = v8::HeapProfiler::GetPersistentHandleCount();
-
- // Create some persistent handles.
- v8::Persistent<v8::String> p_AAA =
- v8::Persistent<v8::String>::New(isolate, v8_str("AAA"));
- CHECK_EQ(global_handle_count + 1,
- v8::HeapProfiler::GetPersistentHandleCount());
- v8::Persistent<v8::String> p_BBB =
- v8::Persistent<v8::String>::New(isolate, v8_str("BBB"));
- CHECK_EQ(global_handle_count + 2,
- v8::HeapProfiler::GetPersistentHandleCount());
- v8::Persistent<v8::String> p_CCC =
- v8::Persistent<v8::String>::New(isolate, v8_str("CCC"));
- CHECK_EQ(global_handle_count + 3,
- v8::HeapProfiler::GetPersistentHandleCount());
-
- // Dipose the persistent handles in a different order.
- p_AAA.Dispose(env->GetIsolate());
- CHECK_EQ(global_handle_count + 2,
- v8::HeapProfiler::GetPersistentHandleCount());
- p_CCC.Dispose(env->GetIsolate());
- CHECK_EQ(global_handle_count + 1,
- v8::HeapProfiler::GetPersistentHandleCount());
- p_BBB.Dispose(env->GetIsolate());
- CHECK_EQ(global_handle_count, v8::HeapProfiler::GetPersistentHandleCount());
-}
-
-
TEST(AllStrongGcRootsHaveNames) {
LocalContext env;
v8::HandleScope scope(env->GetIsolate());
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index d0bec935d..9aa839be6 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -39,13 +39,6 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) env = v8::Context::New();
- env->Enter();
-}
-
// Go through all incremental marking steps in one swoop.
static void SimulateIncrementalMarking() {
@@ -78,7 +71,7 @@ static void CheckMap(Map* map, int type, int instance_size) {
TEST(HeapMaps) {
- InitializeVM();
+ CcTest::InitializeVM();
CheckMap(HEAP->meta_map(), MAP_TYPE, Map::kSize);
CheckMap(HEAP->heap_number_map(), HEAP_NUMBER_TYPE, HeapNumber::kSize);
CheckMap(HEAP->fixed_array_map(), FIXED_ARRAY_TYPE, kVariableSizeSentinel);
@@ -151,7 +144,7 @@ static void CheckFindCodeObject(Isolate* isolate) {
TEST(HeapObjects) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
@@ -236,7 +229,7 @@ TEST(HeapObjects) {
TEST(Tagging) {
- InitializeVM();
+ CcTest::InitializeVM();
int request = 24;
CHECK_EQ(request, static_cast<int>(OBJECT_POINTER_ALIGN(request)));
CHECK(Smi::FromInt(42)->IsSmi());
@@ -252,7 +245,7 @@ TEST(Tagging) {
TEST(GarbageCollection) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -333,8 +326,8 @@ static void VerifyStringAllocation(Isolate* isolate, const char* string) {
TEST(String) {
- InitializeVM();
- Isolate* isolate = reinterpret_cast<Isolate*>(env->GetIsolate());
+ CcTest::InitializeVM();
+ Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
VerifyStringAllocation(isolate, "a");
VerifyStringAllocation(isolate, "ab");
@@ -345,9 +338,9 @@ TEST(String) {
TEST(LocalHandles) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
const char* name = "Kasper the spunky";
Handle<String> string = FACTORY->NewStringFromAscii(CStrVector(name));
CHECK_EQ(StrLength(name), string->length());
@@ -355,7 +348,7 @@ TEST(LocalHandles) {
TEST(GlobalHandles) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -407,7 +400,7 @@ static void TestWeakGlobalHandleCallback(v8::Isolate* isolate,
TEST(WeakGlobalHandlesScavenge) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -449,7 +442,7 @@ TEST(WeakGlobalHandlesScavenge) {
TEST(WeakGlobalHandlesMark) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -495,7 +488,7 @@ TEST(WeakGlobalHandlesMark) {
TEST(DeleteWeakGlobalHandle) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
Factory* factory = isolate->factory();
@@ -610,7 +603,7 @@ static void CheckInternalizedStrings(const char** strings) {
TEST(StringTable) {
- InitializeVM();
+ CcTest::InitializeVM();
CheckInternalizedStrings(not_so_random_string_table);
CheckInternalizedStrings(not_so_random_string_table);
@@ -618,9 +611,9 @@ TEST(StringTable) {
TEST(FunctionAllocation) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
FACTORY->NewFunction(name, FACTORY->undefined_value());
@@ -641,9 +634,9 @@ TEST(FunctionAllocation) {
TEST(ObjectProperties) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
String* object_string = String::cast(HEAP->Object_string());
Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(object_string)->ToObjectChecked();
@@ -714,9 +707,9 @@ TEST(ObjectProperties) {
TEST(JSObjectMaps) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
Handle<String> name = FACTORY->InternalizeUtf8String("theFunction");
Handle<JSFunction> function =
FACTORY->NewFunction(name, FACTORY->undefined_value());
@@ -738,9 +731,9 @@ TEST(JSObjectMaps) {
TEST(JSArray) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
Handle<String> name = FACTORY->InternalizeUtf8String("Array");
Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(*name)->ToObjectChecked();
@@ -785,9 +778,9 @@ TEST(JSArray) {
TEST(JSObjectCopy) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
String* object_string = String::cast(HEAP->Object_string());
Object* raw_object = Isolate::Current()->context()->global_object()->
GetProperty(object_string)->ToObjectChecked();
@@ -833,11 +826,11 @@ TEST(JSObjectCopy) {
TEST(StringAllocation) {
- InitializeVM();
+ CcTest::InitializeVM();
const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
for (int length = 0; length < 100; length++) {
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
char* non_ascii = NewArray<char>(3 * length + 1);
char* ascii = NewArray<char>(length + 1);
non_ascii[3 * length] = 0;
@@ -885,8 +878,8 @@ static int ObjectsFoundInHeap(Heap* heap, Handle<Object> objs[], int size) {
TEST(Iteration) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
// Array of objects to scan haep for.
const int objs_count = 6;
@@ -923,13 +916,13 @@ TEST(Iteration) {
TEST(EmptyHandleEscapeFrom) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
Handle<JSObject> runaway;
{
- v8::HandleScope nested(env->GetIsolate());
+ v8::HandleScope nested(CcTest::isolate());
Handle<JSObject> empty;
runaway = empty.EscapeFrom(&nested);
}
@@ -945,12 +938,12 @@ static int LenFromSize(int size) {
TEST(Regression39128) {
// Test case for crbug.com/39128.
- InitializeVM();
+ CcTest::InitializeVM();
// Increase the chance of 'bump-the-pointer' allocation in old space.
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
@@ -1022,8 +1015,8 @@ TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
" var x = 42;"
" var y = 42;"
@@ -1033,7 +1026,7 @@ TEST(TestCodeFlushing) {
Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun(source);
}
@@ -1069,8 +1062,8 @@ TEST(TestCodeFlushingIncremental) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
" var x = 42;"
" var y = 42;"
@@ -1080,7 +1073,7 @@ TEST(TestCodeFlushingIncremental) {
Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun(source);
}
@@ -1106,7 +1099,7 @@ TEST(TestCodeFlushingIncremental) {
CHECK(!function->is_compiled() || function->IsOptimized());
// This compile will compile the function again.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun("foo();");
}
@@ -1120,7 +1113,7 @@ TEST(TestCodeFlushingIncremental) {
// Force optimization while incremental marking is active and while
// the function is enqueued as a candidate.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
}
@@ -1135,8 +1128,8 @@ TEST(TestCodeFlushingIncrementalScavenge) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "var foo = function() {"
" var x = 42;"
" var y = 42;"
@@ -1154,7 +1147,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
// This compile will add the code to the compilation cache.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun(source);
}
@@ -1171,7 +1164,7 @@ TEST(TestCodeFlushingIncrementalScavenge) {
CHECK(function2->shared()->is_compiled());
// Clear references to functions so that one of them can die.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun("foo = 0; bar = 0;");
}
@@ -1201,10 +1194,10 @@ TEST(TestCodeFlushingIncrementalAbort) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "function foo() {"
" var x = 42;"
" var y = 42;"
@@ -1214,7 +1207,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
Handle<String> foo_name = FACTORY->InternalizeUtf8String("foo");
// This compile will add the code to the compilation cache.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun(source);
}
@@ -1251,7 +1244,7 @@ TEST(TestCodeFlushingIncrementalAbort) {
#endif // ENABLE_DEBUGGER_SUPPORT
// Force optimization now that code flushing is disabled.
- { v8::HandleScope scope(env->GetIsolate());
+ { v8::HandleScope scope(CcTest::isolate());
CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
}
@@ -1523,7 +1516,7 @@ TEST(TestSizeOfObjects) {
TEST(TestSizeOfObjectsVsHeapIteratorPrecision) {
- InitializeVM();
+ CcTest::InitializeVM();
HEAP->EnsureHeapIsIterable();
intptr_t size_of_objects_1 = HEAP->SizeOfObjects();
HeapIterator iterator(HEAP);
@@ -1574,7 +1567,7 @@ static void FillUpNewSpace(NewSpace* new_space) {
TEST(GrowAndShrinkNewSpace) {
- InitializeVM();
+ CcTest::InitializeVM();
NewSpace* new_space = HEAP->new_space();
if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
@@ -1624,7 +1617,7 @@ TEST(GrowAndShrinkNewSpace) {
TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
- InitializeVM();
+ CcTest::InitializeVM();
if (HEAP->ReservedSemiSpaceSize() == HEAP->InitialSemiSpaceSize() ||
HEAP->MaxSemiSpaceSize() == HEAP->InitialSemiSpaceSize()) {
@@ -1634,7 +1627,7 @@ TEST(CollectingAllAvailableGarbageShrinksNewSpace) {
return;
}
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
NewSpace* new_space = HEAP->new_space();
intptr_t old_capacity, new_capacity;
old_capacity = new_space->Capacity();
@@ -1816,7 +1809,7 @@ TEST(InstanceOfStubWriteBarrier) {
i::FLAG_verify_heap = true;
#endif
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft()) return;
if (i::FLAG_force_marking_deque_overflows) return;
v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
@@ -1867,8 +1860,8 @@ TEST(InstanceOfStubWriteBarrier) {
TEST(PrototypeTransitionClearing) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
CompileRun(
"var base = {};"
@@ -1930,7 +1923,7 @@ TEST(ResetSharedFunctionInfoCountersDuringIncrementalMarking) {
i::FLAG_verify_heap = true;
#endif
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft()) return;
v8::HandleScope outer_scope(v8::Isolate::GetCurrent());
@@ -1986,12 +1979,12 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
i::FLAG_verify_heap = true;
#endif
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft()) return;
- v8::HandleScope outer_scope(env->GetIsolate());
+ v8::HandleScope outer_scope(CcTest::isolate());
{
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
CompileRun(
"function f () {"
" var s = 0;"
@@ -2025,10 +2018,10 @@ TEST(ResetSharedFunctionInfoCountersDuringMarkSweep) {
// Test that HAllocateObject will always return an object in new-space.
TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
SimulateFullSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
@@ -2056,10 +2049,10 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
TEST(OptimizedPretenuringArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_pretenure_literals = true;
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
@@ -2079,13 +2072,37 @@ TEST(OptimizedPretenuringArrayLiterals) {
}
+TEST(OptimizedPretenuringSimpleArrayLiterals) {
+ i::FLAG_allow_natives_syntax = true;
+ i::FLAG_pretenure_literals = false;
+ CcTest::InitializeVM();
+ if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
+ if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
+ v8::HandleScope scope(CcTest::isolate());
+
+ AlwaysAllocateScope always_allocate;
+ v8::Local<v8::Value> res = CompileRun(
+ "function f() {"
+ " return [1, 2, 3];"
+ "};"
+ "f(); f(); f();"
+ "%OptimizeFunctionOnNextCall(f);"
+ "f();");
+
+ Handle<JSObject> o =
+ v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
+
+ CHECK(HEAP->InNewSpace(*o));
+}
+
+
// Test regular array literals allocation.
TEST(OptimizedAllocationArrayLiterals) {
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
+ CcTest::InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
@@ -2117,8 +2134,8 @@ static int CountMapTransitions(Map* map) {
TEST(Regress1465) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_trace_incremental_marking = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
static const int transitions_count = 256;
{
@@ -2155,8 +2172,8 @@ TEST(Regress1465) {
TEST(Regress2143a) {
i::FLAG_collect_maps = true;
i::FLAG_incremental_marking = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
// Prepare a map transition from the root object together with a yet
// untransitioned root object.
@@ -2196,8 +2213,8 @@ TEST(Regress2143b) {
i::FLAG_collect_maps = true;
i::FLAG_incremental_marking = true;
i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
// Prepare a map transition from the root object together with a yet
// untransitioned root object.
@@ -2241,8 +2258,8 @@ TEST(ReleaseOverReservedPages) {
// The optimizer can allocate stuff, messing up the test.
i::FLAG_crankshaft = false;
i::FLAG_always_opt = false;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
static const int number_of_test_pages = 20;
// Prepare many pages with low live-bytes count.
@@ -2280,13 +2297,13 @@ TEST(ReleaseOverReservedPages) {
TEST(Regress2237) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
Handle<String> slice(HEAP->empty_string());
{
// Generate a parent that lives in new-space.
- v8::HandleScope inner_scope(env->GetIsolate());
+ v8::HandleScope inner_scope(CcTest::isolate());
const char* c = "This text is long enough to trigger sliced strings.";
Handle<String> s = FACTORY->NewStringFromAscii(CStrVector(c));
CHECK(s->IsSeqOneByteString());
@@ -2310,8 +2327,8 @@ TEST(Regress2237) {
#ifdef OBJECT_PRINT
TEST(PrintSharedFunctionInfo) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
const char* source = "f = function() { return 987654321; }\n"
"g = function() { return 123456789; }\n";
CompileRun(source);
@@ -2327,8 +2344,8 @@ TEST(PrintSharedFunctionInfo) {
TEST(Regress2211) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::String> value = v8_str("val string");
Smi* hash = Smi::FromInt(321);
@@ -2365,8 +2382,8 @@ TEST(Regress2211) {
TEST(IncrementalMarkingClearsTypeFeedbackCells) {
if (i::FLAG_always_opt) return;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> fun1, fun2;
{
@@ -2424,8 +2441,8 @@ static Code* FindFirstIC(Code* code, Code::Kind kind) {
TEST(IncrementalMarkingPreservesMonomorhpicIC) {
if (i::FLAG_always_opt) return;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
// Prepare function f that contains a monomorphic IC for object
// originating from the same native context.
@@ -2449,8 +2466,8 @@ TEST(IncrementalMarkingPreservesMonomorhpicIC) {
TEST(IncrementalMarkingClearsMonomorhpicIC) {
if (i::FLAG_always_opt) return;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> obj1;
{
@@ -2483,8 +2500,8 @@ TEST(IncrementalMarkingClearsMonomorhpicIC) {
TEST(IncrementalMarkingClearsPolymorhpicIC) {
if (i::FLAG_always_opt) return;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> obj1, obj2;
{
@@ -2549,11 +2566,11 @@ void ReleaseStackTraceDataTest(const char* source) {
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
SourceResource* resource = new SourceResource(i::StrDup(source));
{
- v8::HandleScope scope(env->GetIsolate());
+ v8::HandleScope scope(CcTest::isolate());
v8::Handle<v8::String> source_string = v8::String::NewExternal(resource);
v8::Script::Compile(source_string)->Run();
CHECK(!resource->IsDisposed());
@@ -2585,7 +2602,7 @@ TEST(ReleaseStackTraceData) {
TEST(Regression144230) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -2648,7 +2665,7 @@ TEST(Regression144230) {
TEST(Regress159140) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -2710,7 +2727,7 @@ TEST(Regress159140) {
TEST(Regress165495) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -2758,7 +2775,7 @@ TEST(Regress169209) {
i::FLAG_stress_compaction = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -2844,8 +2861,8 @@ static inline void AllocateAllButNBytes(v8::internal::NewSpace* space,
TEST(Regress169928) {
i::FLAG_allow_natives_syntax = true;
i::FLAG_crankshaft = false;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
// Some flags turn Scavenge collections into Mark-sweep collections
// and hence are incompatible with this test case.
@@ -2917,7 +2934,7 @@ TEST(Regress168801) {
i::FLAG_cache_optimized_code = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -2973,7 +2990,7 @@ TEST(Regress173458) {
i::FLAG_cache_optimized_code = false;
i::FLAG_allow_natives_syntax = true;
i::FLAG_flush_code_incrementally = true;
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
HandleScope scope(isolate);
@@ -3030,7 +3047,7 @@ class DummyVisitor : public ObjectVisitor {
TEST(DeferredHandles) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
Heap* heap = isolate->heap();
v8::HandleScope scope;
diff --git a/deps/v8/test/cctest/test-log-stack-tracer.cc b/deps/v8/test/cctest/test-log-stack-tracer.cc
index c27d5b87e..5bfc1d36b 100644
--- a/deps/v8/test/cctest/test-log-stack-tracer.cc
+++ b/deps/v8/test/cctest/test-log-stack-tracer.cc
@@ -51,13 +51,9 @@ using v8::internal::Address;
using v8::internal::Handle;
using v8::internal::Isolate;
using v8::internal::JSFunction;
-using v8::internal::StackTracer;
using v8::internal::TickSample;
-static v8::Persistent<v8::Context> env;
-
-
static struct {
TickSample* sample;
} trace_env = { NULL };
@@ -73,7 +69,7 @@ static void DoTrace(Address fp) {
// sp is only used to define stack high bound
trace_env.sample->sp =
reinterpret_cast<Address>(trace_env.sample) - 10240;
- StackTracer::Trace(Isolate::Current(), trace_env.sample);
+ trace_env.sample->Trace(Isolate::Current());
}
@@ -185,23 +181,13 @@ static TraceExtension kTraceExtension;
v8::DeclareExtension kTraceExtensionDeclaration(&kTraceExtension);
-static void InitializeVM() {
- if (env.IsEmpty()) {
- const char* extensions[] = { "v8/trace" };
- v8::ExtensionConfiguration config(1, extensions);
- env = v8::Context::New(&config);
- }
- env->Enter();
-}
-
-
static bool IsAddressWithinFuncCode(JSFunction* function, Address addr) {
i::Code* code = function->code();
return code->contains(addr);
}
static bool IsAddressWithinFuncCode(const char* func_name, Address addr) {
- v8::Local<v8::Value> func = env->Global()->Get(v8_str(func_name));
+ v8::Local<v8::Value> func = CcTest::env()->Global()->Get(v8_str(func_name));
CHECK(func->IsFunction());
JSFunction* js_func = JSFunction::cast(*v8::Utils::OpenHandle(*func));
return IsAddressWithinFuncCode(js_func, addr);
@@ -243,7 +229,7 @@ void CreateFramePointerGrabberConstructor(const char* constructor_name) {
v8::FunctionTemplate::New(construct_call);
constructor_template->SetClassName(v8_str("FPGrabber"));
Local<Function> fun = constructor_template->GetFunction();
- env->Global()->Set(v8_str(constructor_name), fun);
+ CcTest::env()->Global()->Set(v8_str(constructor_name), fun);
}
@@ -271,7 +257,7 @@ static void CreateTraceCallerFunction(const char* func_name,
// This test verifies that stack tracing works when called during
// execution of a native function called from JS code. In this case,
-// StackTracer uses Isolate::c_entry_fp as a starting point for stack
+// TickSample::Trace uses Isolate::c_entry_fp as a starting point for stack
// walking.
TEST(CFromJSStackTrace) {
// BUG(1303) Inlining of JSFuncDoTrace() in JSTrace below breaks this test.
@@ -280,8 +266,8 @@ TEST(CFromJSStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM(TRACE_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
// Create global function JSFuncDoTrace which calls
// extension function trace() with the current frame pointer value.
CreateTraceCallerFunction("JSFuncDoTrace", "trace");
@@ -298,9 +284,9 @@ TEST(CFromJSStackTrace) {
// JSFuncDoTrace() [JS] [captures EBP value and encodes it as Smi]
// trace(EBP) [native (extension)]
// DoTrace(EBP) [native]
- // StackTracer::Trace
+ // TickSample::Trace
- CHECK(sample.has_external_callback);
+ CHECK(sample.external_callback);
CHECK_EQ(FUNCTION_ADDR(TraceExtension::Trace), sample.external_callback);
// Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
@@ -313,9 +299,9 @@ TEST(CFromJSStackTrace) {
// This test verifies that stack tracing works when called during
-// execution of JS code. However, as calling StackTracer requires
+// execution of JS code. However, as calling TickSample::Trace requires
// entering native code, we can only emulate pure JS by erasing
-// Isolate::c_entry_fp value. In this case, StackTracer uses passed frame
+// Isolate::c_entry_fp value. In this case, TickSample::Trace uses passed frame
// pointer value as a starting point for stack walking.
TEST(PureJSStackTrace) {
// This test does not pass with inlining enabled since inlined functions
@@ -325,8 +311,8 @@ TEST(PureJSStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM(TRACE_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
// Create global function JSFuncDoTrace which calls
// extension function js_trace() with the current frame pointer value.
CreateTraceCallerFunction("JSFuncDoTrace", "js_trace");
@@ -347,10 +333,10 @@ TEST(PureJSStackTrace) {
// JSFuncDoTrace() [JS]
// js_trace(EBP) [native (extension)]
// DoTraceHideCEntryFPAddress(EBP) [native]
- // StackTracer::Trace
+ // TickSample::Trace
//
- CHECK(sample.has_external_callback);
+ CHECK(sample.external_callback);
CHECK_EQ(FUNCTION_ADDR(TraceExtension::JSTrace), sample.external_callback);
// Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
@@ -387,20 +373,20 @@ static int CFunc(int depth) {
// This test verifies that stack tracing doesn't crash when called on
-// pure native code. StackTracer only unrolls JS code, so we can't
+// pure native code. TickSample::Trace only unrolls JS code, so we can't
// get any meaningful info here.
TEST(PureCStackTrace) {
TickSample sample;
InitTraceEnv(&sample);
- InitializeVM();
+ CcTest::InitializeVM(TRACE_EXTENSION);
// Check that sampler doesn't crash
CHECK_EQ(10, CFunc(10));
}
TEST(JsEntrySp) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM(TRACE_EXTENSION);
+ v8::HandleScope scope(CcTest::isolate());
CHECK_EQ(0, GetJsEntrySp());
CompileRun("a = 1; b = a + 1;");
CHECK_EQ(0, GetJsEntrySp());
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 53ccd3e78..24e5db933 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -62,13 +62,14 @@ class ScopedLoggerInitializer {
// Need to run this prior to creating the scope.
trick_to_run_init_flags_(init_flags_(prof_lazy)),
scope_(v8::Isolate::GetCurrent()),
- env_(v8::Context::New()) {
+ env_(v8::Context::New()),
+ logger_(i::Isolate::Current()->logger()) {
env_->Enter();
}
~ScopedLoggerInitializer() {
env_->Exit();
- LOGGER->TearDown();
+ logger_->TearDown();
if (temp_file_ != NULL) fclose(temp_file_);
i::FLAG_prof_lazy = saved_prof_lazy_;
i::FLAG_prof = saved_prof_;
@@ -78,8 +79,10 @@ class ScopedLoggerInitializer {
v8::Handle<v8::Context>& env() { return env_; }
+ Logger* logger() { return logger_; }
+
FILE* StopLoggingGetTempFile() {
- temp_file_ = LOGGER->TearDown();
+ temp_file_ = logger_->TearDown();
CHECK_NE(NULL, temp_file_);
fflush(temp_file_);
rewind(temp_file_);
@@ -104,6 +107,7 @@ class ScopedLoggerInitializer {
const bool trick_to_run_init_flags_;
v8::HandleScope scope_;
v8::Handle<v8::Context> env_;
+ Logger* logger_;
DISALLOW_COPY_AND_ASSIGN(ScopedLoggerInitializer);
};
@@ -123,12 +127,13 @@ static const char* StrNStr(const char* s1, const char* s2, int n) {
TEST(ProfLazyMode) {
ScopedLoggerInitializer initialize_logger(true);
+ Logger* logger = initialize_logger.logger();
if (!i::V8::UseCrankshaft()) return;
- LOGGER->StringEvent("test-start", "");
+ logger->StringEvent("test-start", "");
CompileRun("var a = (function(x) { return x + 1; })(10);");
- LOGGER->StringEvent("test-profiler-start", "");
+ logger->StringEvent("test-profiler-start", "");
v8::V8::ResumeProfiler();
CompileRun(
"var b = (function(x) { return x + 2; })(10);\n"
@@ -136,10 +141,10 @@ TEST(ProfLazyMode) {
"var d = (function(x) { return x + 4; })(10);\n"
"var e = (function(x) { return x + 5; })(10);");
v8::V8::PauseProfiler();
- LOGGER->StringEvent("test-profiler-stop", "");
+ logger->StringEvent("test-profiler-stop", "");
CompileRun("var f = (function(x) { return x + 6; })(10);");
// Check that profiling can be resumed again.
- LOGGER->StringEvent("test-profiler-start-2", "");
+ logger->StringEvent("test-profiler-start-2", "");
v8::V8::ResumeProfiler();
CompileRun(
"var g = (function(x) { return x + 7; })(10);\n"
@@ -147,8 +152,8 @@ TEST(ProfLazyMode) {
"var i = (function(x) { return x + 9; })(10);\n"
"var j = (function(x) { return x + 10; })(10);");
v8::V8::PauseProfiler();
- LOGGER->StringEvent("test-profiler-stop-2", "");
- LOGGER->StringEvent("test-stop", "");
+ logger->StringEvent("test-profiler-stop-2", "");
+ logger->StringEvent("test-stop", "");
bool exists = false;
i::Vector<const char> log(
@@ -383,7 +388,7 @@ TEST(Issue23768) {
i_source->set_resource(NULL);
// Must not crash.
- LOGGER->LogCompiledFunctions();
+ i::Isolate::Current()->logger()->LogCompiledFunctions();
}
@@ -393,6 +398,7 @@ static v8::Handle<v8::Value> ObjMethod1(const v8::Arguments& args) {
TEST(LogCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
+ Logger* logger = initialize_logger.logger();
v8::Persistent<v8::FunctionTemplate> obj =
v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
@@ -409,7 +415,7 @@ TEST(LogCallbacks) {
initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
CompileRun("Obj.prototype.method1.toString();");
- LOGGER->LogCompiledFunctions();
+ logger->LogCompiledFunctions();
bool exists = false;
i::Vector<const char> log(
@@ -444,6 +450,7 @@ static v8::Handle<v8::Value> Prop2Getter(v8::Local<v8::String> property,
TEST(LogAccessorCallbacks) {
ScopedLoggerInitializer initialize_logger(false);
+ Logger* logger = initialize_logger.logger();
v8::Persistent<v8::FunctionTemplate> obj =
v8::Persistent<v8::FunctionTemplate>::New(v8::Isolate::GetCurrent(),
@@ -453,7 +460,7 @@ TEST(LogAccessorCallbacks) {
inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
inst->SetAccessor(v8_str("prop2"), Prop2Getter);
- LOGGER->LogAccessorCallbacks();
+ logger->LogAccessorCallbacks();
bool exists = false;
i::Vector<const char> log(
@@ -487,12 +494,13 @@ TEST(LogAccessorCallbacks) {
TEST(IsLoggingPreserved) {
ScopedLoggerInitializer initialize_logger(false);
+ Logger* logger = initialize_logger.logger();
- CHECK(LOGGER->is_logging());
- LOGGER->ResumeProfiler();
- CHECK(LOGGER->is_logging());
- LOGGER->PauseProfiler();
- CHECK(LOGGER->is_logging());
+ CHECK(logger->is_logging());
+ logger->ResumeProfiler();
+ CHECK(logger->is_logging());
+ logger->PauseProfiler();
+ CHECK(logger->is_logging());
}
@@ -513,6 +521,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
// Start with profiling to capture all code events from the beginning.
ScopedLoggerInitializer initialize_logger(false);
+ Logger* logger = initialize_logger.logger();
// Compile and run a function that creates other functions.
CompileRun(
@@ -522,11 +531,11 @@ TEST(EquivalenceOfLoggingAndTraversal) {
"})(this);");
v8::V8::PauseProfiler();
HEAP->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
- LOGGER->StringEvent("test-logging-done", "");
+ logger->StringEvent("test-logging-done", "");
// Iterate heap to find compiled functions, will write to log.
- LOGGER->LogCompiledFunctions();
- LOGGER->StringEvent("test-traversal-done", "");
+ logger->LogCompiledFunctions();
+ logger->StringEvent("test-traversal-done", "");
bool exists = false;
i::Vector<const char> log(
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 4640599aa..db8c3e403 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -43,16 +43,9 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) env = v8::Context::New();
- env->Enter();
-}
-
TEST(MarkingDeque) {
- InitializeVM();
+ CcTest::InitializeVM();
int mem_size = 20 * kPointerSize;
byte* mem = NewArray<byte>(20*kPointerSize);
Address low = reinterpret_cast<Address>(mem);
@@ -89,9 +82,9 @@ TEST(Promotion) {
FLAG_always_compact = true;
HEAP->ConfigureHeap(2*256*KB, 8*MB, 8*MB);
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
// Allocate a fixed array in the new space.
int array_size =
@@ -117,9 +110,9 @@ TEST(NoPromotion) {
// Test the situation that some objects in new space are promoted to
// the old space
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
// Do a mark compact GC to shrink the heap.
HEAP->CollectGarbage(OLD_POINTER_SPACE);
@@ -155,9 +148,9 @@ TEST(NoPromotion) {
TEST(MarkCompactCollector) {
- InitializeVM();
+ CcTest::InitializeVM();
- v8::HandleScope sc(env->GetIsolate());
+ v8::HandleScope sc(CcTest::isolate());
// call mark-compact when heap is empty
HEAP->CollectGarbage(OLD_POINTER_SPACE);
@@ -248,7 +241,7 @@ static Handle<Map> CreateMap() {
TEST(MapCompact) {
FLAG_max_map_space_pages = 16;
- InitializeVM();
+ CcTest::InitializeVM();
{
v8::HandleScope sc;
@@ -287,7 +280,7 @@ static void GCEpilogueCallbackFunc() {
TEST(GCCallback) {
- InitializeVM();
+ CcTest::InitializeVM();
HEAP->SetGlobalGCPrologueCallback(&GCPrologueCallbackFunc);
HEAP->SetGlobalGCEpilogueCallback(&GCEpilogueCallbackFunc);
@@ -315,11 +308,11 @@ static void WeakPointerCallback(v8::Isolate* isolate,
TEST(ObjectGroups) {
FLAG_incremental_marking = false;
- InitializeVM();
+ CcTest::InitializeVM();
GlobalHandles* global_handles = Isolate::Current()->global_handles();
NumberOfWeakCalls = 0;
- v8::HandleScope handle_scope(env->GetIsolate());
+ v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> g1s1 =
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
@@ -452,10 +445,10 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
TEST(EmptyObjectGroups) {
- InitializeVM();
+ CcTest::InitializeVM();
GlobalHandles* global_handles = Isolate::Current()->global_handles();
- v8::HandleScope handle_scope(env->GetIsolate());
+ v8::HandleScope handle_scope(CcTest::isolate());
Handle<Object> object =
global_handles->Create(HEAP->AllocateFixedArray(1)->ToObjectChecked());
@@ -552,7 +545,7 @@ TEST(BootUpMemoryUse) {
// Only Linux has the proc filesystem and only if it is mapped. If it's not
// there we just skip the test.
if (initial_memory >= 0) {
- InitializeVM();
+ CcTest::InitializeVM();
intptr_t delta = MemoryInUse() - initial_memory;
printf("delta: %" V8_PTR_PREFIX "d kB\n", delta / 1024);
if (sizeof(initial_memory) == 8) { // 64-bit.
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index c8956c9eb..c7331bdb9 100644
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -83,7 +83,7 @@ TEST(ScanKeywords) {
// Adding characters will make keyword matching fail.
static const char chars_to_append[] = { 'z', '0', '_' };
for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
- memmove(buffer, keyword, length);
+ i::OS::MemMove(buffer, keyword, length);
buffer[length] = chars_to_append[j];
i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
i::Scanner scanner(&unicode_cache);
@@ -93,7 +93,7 @@ TEST(ScanKeywords) {
}
// Replacing characters will make keyword matching fail.
{
- memmove(buffer, keyword, length);
+ i::OS::MemMove(buffer, keyword, length);
buffer[length - 1] = '_';
i::Utf8ToUtf16CharacterStream stream(buffer, length);
i::Scanner scanner(&unicode_cache);
@@ -262,12 +262,11 @@ TEST(StandAlonePreParser) {
i::Scanner scanner(i::Isolate::Current()->unicode_cache());
scanner.Initialize(&stream);
- int flags = i::kAllowLazy | i::kAllowNativesSyntax;
+ v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ preparser.set_allow_lazy(true);
+ preparser.set_allow_natives_syntax(true);
v8::preparser::PreParser::PreParseResult result =
- v8::preparser::PreParser::PreParseProgram(&scanner,
- &log,
- flags,
- stack_limit);
+ preparser.PreParseProgram();
CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
i::ScriptDataImpl data(log.ExtractData());
CHECK(!data.has_error());
@@ -298,12 +297,11 @@ TEST(StandAlonePreParserNoNatives) {
i::Scanner scanner(i::Isolate::Current()->unicode_cache());
scanner.Initialize(&stream);
- // Flags don't allow natives syntax.
+ // Preparser defaults to disallowing natives syntax.
+ v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ preparser.set_allow_lazy(true);
v8::preparser::PreParser::PreParseResult result =
- v8::preparser::PreParser::PreParseProgram(&scanner,
- &log,
- i::kAllowLazy,
- stack_limit);
+ preparser.PreParseProgram();
CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
i::ScriptDataImpl data(log.ExtractData());
// Data contains syntax error.
@@ -329,8 +327,7 @@ TEST(RegressChromium62639) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const i::byte*>(program),
static_cast<unsigned>(strlen(program)));
- i::ScriptDataImpl* data =
- i::ParserApi::PreParse(&stream, NULL, false);
+ i::ScriptDataImpl* data = i::PreParserApi::PreParse(&stream);
CHECK(data->HasError());
delete data;
}
@@ -355,7 +352,7 @@ TEST(Regress928) {
i::Handle<i::String> source(
FACTORY->NewStringFromAscii(i::CStrVector(program)));
i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
- i::ScriptDataImpl* data = i::ParserApi::PreParse(&stream, NULL, false);
+ i::ScriptDataImpl* data = i::PreParserApi::PreParse(&stream);
CHECK(!data->HasError());
data->Initialize();
@@ -401,12 +398,10 @@ TEST(PreParseOverflow) {
i::Scanner scanner(i::Isolate::Current()->unicode_cache());
scanner.Initialize(&stream);
-
+ v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ preparser.set_allow_lazy(true);
v8::preparser::PreParser::PreParseResult result =
- v8::preparser::PreParser::PreParseProgram(&scanner,
- &log,
- true,
- stack_limit);
+ preparser.PreParseProgram();
CHECK_EQ(v8::preparser::PreParser::kPreParseStackOverflow, result);
}
@@ -995,7 +990,6 @@ TEST(ScopePositions) {
int marker;
i::Isolate::Current()->stack_guard()->SetStackLimit(
reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
- i::FLAG_harmony_scoping = true;
for (int i = 0; source_data[i].outer_prefix; i++) {
int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
@@ -1018,7 +1012,9 @@ TEST(ScopePositions) {
CHECK_EQ(source->length(), kProgramSize);
i::Handle<i::Script> script = FACTORY->NewScript(source);
i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, i::kAllowLazy | i::EXTENDED_MODE, NULL, NULL);
+ i::Parser parser(&info);
+ parser.set_allow_lazy(true);
+ parser.set_allow_harmony_scoping(true);
info.MarkAsGlobal();
info.SetLanguageMode(source_data[i].language_mode);
i::FunctionLiteral* function = parser.ParseProgram();
@@ -1066,31 +1062,57 @@ i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
}
-void TestParserSync(i::Handle<i::String> source, int flags) {
+enum ParserFlag {
+ kAllowLazy,
+ kAllowNativesSyntax,
+ kAllowHarmonyScoping,
+ kAllowModules,
+ kAllowGenerators,
+ kParserFlagCount
+};
+
+
+static bool checkParserFlag(unsigned flags, ParserFlag flag) {
+ return flags & (1 << flag);
+}
+
+
+#define SET_PARSER_FLAGS(parser, flags) \
+ parser.set_allow_lazy(checkParserFlag(flags, kAllowLazy)); \
+ parser.set_allow_natives_syntax(checkParserFlag(flags, \
+ kAllowNativesSyntax)); \
+ parser.set_allow_harmony_scoping(checkParserFlag(flags, \
+ kAllowHarmonyScoping)); \
+ parser.set_allow_modules(checkParserFlag(flags, kAllowModules)); \
+ parser.set_allow_generators(checkParserFlag(flags, kAllowGenerators));
+
+void TestParserSyncWithFlags(i::Handle<i::String> source, unsigned flags) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
- bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
// Preparse the data.
i::CompleteParserRecorder log;
- i::Scanner scanner(i::Isolate::Current()->unicode_cache());
- i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
- scanner.SetHarmonyScoping(harmony_scoping);
- scanner.Initialize(&stream);
- v8::preparser::PreParser::PreParseResult result =
- v8::preparser::PreParser::PreParseProgram(
- &scanner, &log, flags, stack_limit);
- CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ {
+ i::Scanner scanner(i::Isolate::Current()->unicode_cache());
+ i::GenericStringUtf16CharacterStream stream(source, 0, source->length());
+ v8::preparser::PreParser preparser(&scanner, &log, stack_limit);
+ SET_PARSER_FLAGS(preparser, flags);
+ scanner.Initialize(&stream);
+ v8::preparser::PreParser::PreParseResult result =
+ preparser.PreParseProgram();
+ CHECK_EQ(v8::preparser::PreParser::kPreParseSuccess, result);
+ }
i::ScriptDataImpl data(log.ExtractData());
// Parse the data
- i::Handle<i::Script> script = FACTORY->NewScript(source);
- bool save_harmony_scoping = i::FLAG_harmony_scoping;
- i::FLAG_harmony_scoping = harmony_scoping;
- i::CompilationInfoWithZone info(script);
- i::Parser parser(&info, flags, NULL, NULL);
- info.MarkAsGlobal();
- i::FunctionLiteral* function = parser.ParseProgram();
- i::FLAG_harmony_scoping = save_harmony_scoping;
+ i::FunctionLiteral* function;
+ {
+ i::Handle<i::Script> script = FACTORY->NewScript(source);
+ i::CompilationInfoWithZone info(script);
+ i::Parser parser(&info);
+ SET_PARSER_FLAGS(parser, flags);
+ info.MarkAsGlobal();
+ function = parser.ParseProgram();
+ }
// Check that preparsing fails iff parsing fails.
if (function == NULL) {
@@ -1140,19 +1162,9 @@ void TestParserSync(i::Handle<i::String> source, int flags) {
}
-void TestParserSyncWithFlags(i::Handle<i::String> source) {
- static const int kFlagsCount = 6;
- const int flags[kFlagsCount] = {
- i::kNoParsingFlags | i::CLASSIC_MODE,
- i::kNoParsingFlags | i::STRICT_MODE,
- i::kNoParsingFlags | i::EXTENDED_MODE,
- i::kAllowLazy | i::CLASSIC_MODE,
- i::kAllowLazy | i::STRICT_MODE,
- i::kAllowLazy | i::EXTENDED_MODE
- };
-
- for (int k = 0; k < kFlagsCount; ++k) {
- TestParserSync(source, flags[k]);
+void TestParserSync(i::Handle<i::String> source) {
+ for (unsigned flags = 0; flags < (1 << kParserFlagCount); ++flags) {
+ TestParserSyncWithFlags(source, flags);
}
}
@@ -1255,7 +1267,7 @@ TEST(ParserSync) {
CHECK(length == kProgramSize);
i::Handle<i::String> source =
FACTORY->NewStringFromAscii(i::CStrVector(program.start()));
- TestParserSyncWithFlags(source);
+ TestParserSync(source);
}
}
}
diff --git a/deps/v8/test/cctest/test-platform-win32.cc b/deps/v8/test/cctest/test-platform-win32.cc
index 1f96ce687..a5089d360 100644
--- a/deps/v8/test/cctest/test-platform-win32.cc
+++ b/deps/v8/test/cctest/test-platform-win32.cc
@@ -33,6 +33,7 @@
#include "platform.h"
#include "cctest.h"
+#include "win32-headers.h"
using namespace ::v8::internal;
diff --git a/deps/v8/test/cctest/test-profile-generator.cc b/deps/v8/test/cctest/test-profile-generator.cc
index 56b1788a8..0682fbcdb 100644
--- a/deps/v8/test/cctest/test-profile-generator.cc
+++ b/deps/v8/test/cctest/test-profile-generator.cc
@@ -624,13 +624,11 @@ TEST(RecordTickSample) {
// -> ccc -> aaa - sample3
TickSample sample1;
sample1.pc = ToAddress(0x1600);
- sample1.tos = ToAddress(0x1500);
sample1.stack[0] = ToAddress(0x1510);
sample1.frames_count = 1;
generator.RecordTickSample(sample1);
TickSample sample2;
sample2.pc = ToAddress(0x1925);
- sample2.tos = ToAddress(0x1900);
sample2.stack[0] = ToAddress(0x1780);
sample2.stack[1] = ToAddress(0x10000); // non-existent.
sample2.stack[2] = ToAddress(0x1620);
@@ -638,7 +636,6 @@ TEST(RecordTickSample) {
generator.RecordTickSample(sample2);
TickSample sample3;
sample3.pc = ToAddress(0x1510);
- sample3.tos = ToAddress(0x1500);
sample3.stack[0] = ToAddress(0x1910);
sample3.stack[1] = ToAddress(0x1610);
sample3.frames_count = 2;
diff --git a/deps/v8/test/cctest/test-strings.cc b/deps/v8/test/cctest/test-strings.cc
index 9aebdb18f..2889172f5 100644
--- a/deps/v8/test/cctest/test-strings.cc
+++ b/deps/v8/test/cctest/test-strings.cc
@@ -95,18 +95,6 @@ class RandomNumberGenerator {
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- const char* extensions[] = { "v8/print" };
- v8::ExtensionConfiguration config(1, extensions);
- env = v8::Context::New(&config);
- }
- env->Enter();
-}
-
static const int DEEP_DEPTH = 8 * 1024;
static const int SUPER_DEEP_DEPTH = 80 * 1024;
@@ -574,8 +562,8 @@ static void TraverseFirst(Handle<String> s1, Handle<String> s2, int chars) {
TEST(Traverse) {
printf("TestTraverse\n");
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
ConsStringGenerationData data(false);
Handle<String> flat = ConstructBalanced(&data);
@@ -663,7 +651,7 @@ printf(
template<typename BuildString>
void TestStringCharacterStream(BuildString build, int test_cases) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope outer_scope(isolate);
ZoneScope zone(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
@@ -863,8 +851,8 @@ static const int DEEP_ASCII_DEPTH = 100000;
TEST(DeepAscii) {
printf("TestDeepAscii\n");
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
char* foo = NewArray<char>(DEEP_ASCII_DEPTH);
for (int i = 0; i < DEEP_ASCII_DEPTH; i++) {
@@ -888,8 +876,8 @@ TEST(DeepAscii) {
TEST(Utf8Conversion) {
// Smoke test for converting strings to utf-8.
- InitializeVM();
- v8::HandleScope handle_scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope handle_scope(CcTest::isolate());
// A simple ascii string
const char* ascii_string = "abcdef12345";
int len =
@@ -935,8 +923,8 @@ TEST(Utf8Conversion) {
TEST(ExternalShortStringAdd) {
ZoneScope zonescope(Isolate::Current()->runtime_zone(), DELETE_ON_EXIT);
- InitializeVM();
- v8::HandleScope handle_scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope handle_scope(CcTest::isolate());
Zone* zone = Isolate::Current()->runtime_zone();
// Make sure we cover all always-flat lengths and at least one above.
@@ -977,7 +965,7 @@ TEST(ExternalShortStringAdd) {
}
// Add the arrays with the short external strings in the global object.
- v8::Handle<v8::Object> global = env->Global();
+ v8::Handle<v8::Object> global = CcTest::env()->Global();
global->Set(v8_str("external_ascii"), ascii_external_strings);
global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
global->Set(v8_str("max_length"), v8::Integer::New(kMaxLength));
@@ -1027,8 +1015,8 @@ TEST(CachedHashOverflow) {
Isolate* isolate = Isolate::Current();
ZoneScope zone(isolate->runtime_zone(), DELETE_ON_EXIT);
- InitializeVM();
- v8::HandleScope handle_scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope handle_scope(CcTest::isolate());
// Lines must be executed sequentially. Combining them into one script
// makes the bug go away.
const char* lines[] = {
@@ -1070,8 +1058,8 @@ TEST(CachedHashOverflow) {
TEST(SliceFromCons) {
FLAG_string_slices = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
Handle<String> string =
FACTORY->NewStringFromAscii(CStrVector("parentparentparent"));
Handle<String> parent = FACTORY->NewConsString(string, string);
@@ -1104,8 +1092,8 @@ class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
TEST(SliceFromExternal) {
FLAG_string_slices = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
AsciiVectorResource resource(
i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
Handle<String> string = FACTORY->NewExternalStringFromAscii(&resource);
@@ -1123,8 +1111,8 @@ TEST(TrivialSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
FLAG_string_slices = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
Handle<String> string;
const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
@@ -1152,8 +1140,8 @@ TEST(SliceFromSlice) {
// This tests whether a slice that contains the entire parent string
// actually creates a new string (it should not).
FLAG_string_slices = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
Handle<String> string;
const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
@@ -1220,8 +1208,8 @@ TEST(RobustSubStringStub) {
// This tests whether the SubStringStub can handle unsafe arguments.
// If not recognized, those unsafe arguments lead to out-of-bounds reads.
FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
v8::Local<v8::Value> result;
Handle<String> string;
CompileRun("var short = 'abcdef';");
@@ -1264,8 +1252,8 @@ TEST(RobustSubStringStub) {
TEST(RegExpOverflow) {
// Result string has the length 2^32, causing a 32-bit integer overflow.
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8::V8::IgnoreOutOfMemoryException();
v8::Local<v8::Value> result = CompileRun(
@@ -1280,8 +1268,8 @@ TEST(RegExpOverflow) {
TEST(StringReplaceAtomTwoByteResult) {
- InitializeVM();
- v8::HandleScope scope(env->GetIsolate());
+ CcTest::InitializeVM();
+ v8::HandleScope scope(CcTest::isolate());
LocalContext context;
v8::Local<v8::Value> result = CompileRun(
"var subject = 'ascii~only~string~'; "
diff --git a/deps/v8/test/cctest/test-symbols.cc b/deps/v8/test/cctest/test-symbols.cc
index e95345742..6a8323bea 100644
--- a/deps/v8/test/cctest/test-symbols.cc
+++ b/deps/v8/test/cctest/test-symbols.cc
@@ -12,20 +12,9 @@
using namespace v8::internal;
-static v8::Persistent<v8::Context> env;
-
-static void InitializeVM() {
- if (env.IsEmpty()) {
- const char* extensions[] = { "v8/print" };
- v8::ExtensionConfiguration config(1, extensions);
- env = v8::Context::New(&config);
- }
- env->Enter();
-}
-
TEST(Create) {
- InitializeVM();
+ CcTest::InitializeVM();
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
diff --git a/deps/v8/test/cctest/test-utils.cc b/deps/v8/test/cctest/test-utils.cc
index c83acb909..541c42338 100644
--- a/deps/v8/test/cctest/test-utils.cc
+++ b/deps/v8/test/cctest/test-utils.cc
@@ -98,57 +98,68 @@ TEST(SNPrintF) {
}
-void TestMemCopy(Vector<byte> src,
- Vector<byte> dst,
- int source_alignment,
- int destination_alignment,
- int length_alignment) {
- memset(dst.start(), 0xFF, dst.length());
- byte* to = dst.start() + 32 + destination_alignment;
- byte* from = src.start() + source_alignment;
- int length = OS::kMinComplexMemCopy + length_alignment;
- OS::MemCopy(to, from, static_cast<size_t>(length));
- printf("[%d,%d,%d]\n",
- source_alignment, destination_alignment, length_alignment);
- for (int i = 0; i < length; i++) {
- CHECK_EQ(from[i], to[i]);
+static const int kAreaSize = 512;
+
+
+void TestMemMove(byte* area1,
+ byte* area2,
+ byte* area3,
+ int src_offset,
+ int dest_offset,
+ int length) {
+ for (int i = 0; i < kAreaSize; i++) {
+ area1[i] = i & 0xFF;
+ area2[i] = i & 0xFF;
+ area3[i] = i & 0xFF;
+ }
+ OS::MemMove(area1 + dest_offset, area1 + src_offset, length);
+ MoveBytes(area2 + dest_offset, area2 + src_offset, length);
+ memmove(area3 + dest_offset, area3 + src_offset, length);
+ if (memcmp(area1, area3, kAreaSize) != 0) {
+ printf("OS::MemMove(): src_offset: %d, dest_offset: %d, length: %d\n",
+ src_offset, dest_offset, length);
+ for (int i = 0; i < kAreaSize; i++) {
+ if (area1[i] == area3[i]) continue;
+ printf("diff at offset %d (%p): is %d, should be %d\n",
+ i, reinterpret_cast<void*>(area1 + i), area1[i], area3[i]);
+ }
+ CHECK(false);
+ }
+ if (memcmp(area2, area3, kAreaSize) != 0) {
+ printf("MoveBytes(): src_offset: %d, dest_offset: %d, length: %d\n",
+ src_offset, dest_offset, length);
+ for (int i = 0; i < kAreaSize; i++) {
+ if (area2[i] == area3[i]) continue;
+ printf("diff at offset %d (%p): is %d, should be %d\n",
+ i, reinterpret_cast<void*>(area2 + i), area2[i], area3[i]);
+ }
+ CHECK(false);
}
- CHECK_EQ(0xFF, to[-1]);
- CHECK_EQ(0xFF, to[length]);
}
-
-TEST(MemCopy) {
+TEST(MemMove) {
v8::V8::Initialize();
OS::SetUp();
- const int N = OS::kMinComplexMemCopy + 128;
- Vector<byte> buffer1 = Vector<byte>::New(N);
- Vector<byte> buffer2 = Vector<byte>::New(N);
-
- for (int i = 0; i < N; i++) {
- buffer1[i] = static_cast<byte>(i & 0x7F);
- }
-
- // Same alignment.
- for (int i = 0; i < 32; i++) {
- TestMemCopy(buffer1, buffer2, i, i, i * 2);
- }
-
- // Different alignment.
- for (int i = 0; i < 32; i++) {
- for (int j = 1; j < 32; j++) {
- TestMemCopy(buffer1, buffer2, i, (i + j) & 0x1F , 0);
+ byte* area1 = new byte[kAreaSize];
+ byte* area2 = new byte[kAreaSize];
+ byte* area3 = new byte[kAreaSize];
+
+ static const int kMinOffset = 32;
+ static const int kMaxOffset = 64;
+ static const int kMaxLength = 128;
+ STATIC_ASSERT(kMaxOffset + kMaxLength < kAreaSize);
+
+ for (int src_offset = kMinOffset; src_offset <= kMaxOffset; src_offset++) {
+ for (int dst_offset = kMinOffset; dst_offset <= kMaxOffset; dst_offset++) {
+ for (int length = 0; length <= kMaxLength; length++) {
+ TestMemMove(area1, area2, area3, src_offset, dst_offset, length);
+ }
}
}
-
- // Different lengths
- for (int i = 0; i < 32; i++) {
- TestMemCopy(buffer1, buffer2, 3, 7, i);
- }
-
- buffer2.Dispose();
- buffer1.Dispose();
+ delete[] area1;
+ delete[] area2;
+ delete[] area3;
}
diff --git a/deps/v8/test/cctest/testcfg.py b/deps/v8/test/cctest/testcfg.py
index 86dc74057..4c2cd231c 100644
--- a/deps/v8/test/cctest/testcfg.py
+++ b/deps/v8/test/cctest/testcfg.py
@@ -57,6 +57,10 @@ class CcTestSuite(testsuite.TestSuite):
return []
tests = []
for test_desc in output.stdout.strip().split():
+ if test_desc.find('<') < 0:
+ # Native Client output can contain a few non-test arguments
+ # before the tests. Skip these.
+ continue
raw_test, dependency = test_desc.split('<')
if dependency != '':
dependency = raw_test.split('/')[0] + '/' + dependency
diff --git a/deps/v8/test/message/testcfg.py b/deps/v8/test/message/testcfg.py
index 1b788d561..dba65a9a2 100644
--- a/deps/v8/test/message/testcfg.py
+++ b/deps/v8/test/message/testcfg.py
@@ -74,7 +74,13 @@ class MessageTestSuite(testsuite.TestSuite):
"""Ignore empty lines, valgrind output and Android output."""
if not string: return True
return (string.startswith("==") or string.startswith("**") or
- string.startswith("ANDROID"))
+ string.startswith("ANDROID") or
+ # These five patterns appear in normal Native Client output.
+ string.startswith("DEBUG MODE ENABLED") or
+ string.startswith("tools/nacl-run.py") or
+ string.find("BYPASSING ALL ACL CHECKS") > 0 or
+ string.find("Native Client module will be loaded") > 0 or
+ string.find("NaClHostDescOpen:") > 0)
def IsFailureOutput(self, output, testpath):
expected_path = os.path.join(self.root, testpath + ".out")
diff --git a/deps/v8/test/mjsunit/allocation-site-info.js b/deps/v8/test/mjsunit/allocation-site-info.js
index d57fd321e..4d534e303 100644
--- a/deps/v8/test/mjsunit/allocation-site-info.js
+++ b/deps/v8/test/mjsunit/allocation-site-info.js
@@ -144,7 +144,9 @@ if (support_smi_only_arrays) {
obj = fastliteralcase(get_standard_literal(), 1.5);
assertKind(elements_kind.fast_double, obj);
obj = fastliteralcase(get_standard_literal(), 2);
- assertKind(elements_kind.fast_double, obj);
+ // TODO(hpayer): bring the following assert back as soon as allocation
+ // sites work again for fast literals
+ //assertKind(elements_kind.fast_double, obj);
obj = fastliteralcase([5, 3, 2], 1.5);
assertKind(elements_kind.fast_double, obj);
@@ -173,7 +175,9 @@ if (support_smi_only_arrays) {
obj = fastliteralcase_smifast("carter");
assertKind(elements_kind.fast, obj);
obj = fastliteralcase_smifast(2);
- assertKind(elements_kind.fast, obj);
+ // TODO(hpayer): bring the following assert back as soon as allocation
+ // sites work again for fast literals
+ //assertKind(elements_kind.fast, obj);
if (optimize_constructed_arrays) {
function newarraycase_smidouble(value) {
diff --git a/deps/v8/test/mjsunit/bugs/bug-2615.js b/deps/v8/test/mjsunit/bugs/bug-2615.js
new file mode 100644
index 000000000..51aeaf492
--- /dev/null
+++ b/deps/v8/test/mjsunit/bugs/bug-2615.js
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var a = [];
+a[0xfffffffe] = 10;
+assertThrows("a.unshift(1);", RangeError);
+assertEquals(0xffffffff, a.length);
+assertEquals(10, a[0xffffffff]);
+assertEquals(undefined, a[0xfffffffe]);
+
+a = [1,2,3];
+a[0xfffffffe] = 10;
+assertThrows("a.splice(1,1,7,7,7,7,7);", RangeError);
+assertEquals([1,7,7,7,7,7,3], a.slice(0, 7));
+assertEquals(0xffffffff, a.length);
+assertEquals(10, a[0xfffffffe + 5 - 1]);
+
+a = [1];
+Object.defineProperty(a, "1", {writable:false, configurable:false, value: 100});
+assertThrows("a.unshift(4);", TypeError);
+assertEquals([1, 100, 100], a);
+var desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.writable);
+assertEquals(false, desc.configurable);
+
+a = [1];
+var g = function() { return 100; };
+Object.defineProperty(a, "1", {get:g});
+assertThrows("a.unshift(0);", TypeError);
+assertEquals([1, 100, 100], a);
+desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.configurable);
+assertEquals(g, desc.get);
+
+a = [1];
+var c = 0;
+var s = function(v) { c += 1; };
+Object.defineProperty(a, "1", {set:s});
+a.unshift(10);
+assertEquals([10, undefined, undefined], a);
+assertEquals(1, c);
+desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.configurable);
+assertEquals(s, desc.set);
+
+a = [1];
+Object.defineProperty(a, "1", {configurable:false, value:10});
+assertThrows("a.splice(1,1);", TypeError);
+assertEquals([1, 10], a);
+desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.configurable);
+
+a = [0,1,2,3,4,5,6];
+Object.defineProperty(a, "3", {configurable:false, writable:false, value:3});
+assertThrows("a.splice(1,4);", TypeError);
+assertEquals([0,5,6,3,,,,,], a);
+desc = Object.getOwnPropertyDescriptor(a, "3");
+assertEquals(false, desc.configurable);
+assertEquals(false, desc.writable);
+
+a = [0,1,2,3,4,5,6];
+Object.defineProperty(a, "5", {configurable:false, value:5});
+assertThrows("a.splice(1,4);", TypeError);
+assertEquals([0,5,6,3,4,5,,,], a);
+desc = Object.getOwnPropertyDescriptor(a, "5");
+assertEquals(false, desc.configurable);
+
+a = [1,2,3,,5];
+Object.defineProperty(a, "1", {configurable:false, writable:true, value:2});
+assertEquals(1, a.shift());
+assertEquals([2,3,,5], a);
+desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.configurable);
+assertEquals(true, desc.writable);
+assertThrows("a.shift();", TypeError);
+assertEquals([3,3,,5], a);
+desc = Object.getOwnPropertyDescriptor(a, "1");
+assertEquals(false, desc.configurable);
+assertEquals(true, desc.writable);
+
+a = [1,2,3];
+Object.defineProperty(a, "2", {configurable:false, value:3});
+assertThrows("a.pop();", TypeError);
+assertEquals([1,2,3], a);
+desc = Object.getOwnPropertyDescriptor(a, "2");
+assertEquals(false, desc.configurable);
+
+a = [1,2,,,5];
+Object.defineProperty(a, "4", {writable:true, configurable:false, value:5});
+assertThrows("a.sort();", TypeError);
+assertEquals([1,2,5,,5], a);
+desc = Object.getOwnPropertyDescriptor(a, "2");
+assertEquals(true, desc.configurable);
+desc = Object.getOwnPropertyDescriptor(a, "4");
+assertEquals(false, desc.configurable);
+
+a = [1,2,3,,5,6];
+Object.defineProperty(a, "4", {value:5, writable:false});
+assertThrows("a.sort();", TypeError);
+assertEquals([1,2,3,5,5,6], a);
+desc = Object.getOwnPropertyDescriptor(a, "4");
+assertEquals(false, desc.writable);
diff --git a/deps/v8/test/mjsunit/builtins.js b/deps/v8/test/mjsunit/builtins.js
index 062cfd568..ce2c6802f 100644
--- a/deps/v8/test/mjsunit/builtins.js
+++ b/deps/v8/test/mjsunit/builtins.js
@@ -54,7 +54,9 @@ function checkConstructor(func, name) {
assertFalse(proto_desc.writable, name);
assertFalse(proto_desc.configurable, name);
var prototype = proto_desc.value;
- assertEquals(null, Object.getPrototypeOf(prototype), name);
+ assertEquals(name == "GeneratorFunctionPrototype" ? Object.prototype : null,
+ Object.getPrototypeOf(prototype),
+ name);
for (var i = 0; i < propNames.length; i++) {
var propName = propNames[i];
if (propName == "constructor") continue;
diff --git a/deps/v8/test/mjsunit/external-array-no-sse2.js b/deps/v8/test/mjsunit/external-array-no-sse2.js
new file mode 100644
index 000000000..0b843d865
--- /dev/null
+++ b/deps/v8/test/mjsunit/external-array-no-sse2.js
@@ -0,0 +1,716 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --expose-gc --noenable-sse2
+
+// Helper
+function assertInstance(o, f) {
+ assertSame(o.constructor, f);
+ assertInstanceof(o, f);
+}
+
+// This is a regression test for overlapping key and value registers.
+function f(a) {
+ a[0] = 0;
+ a[1] = 0;
+}
+
+var a = new Int32Array(2);
+for (var i = 0; i < 5; i++) {
+ f(a);
+}
+%OptimizeFunctionOnNextCall(f);
+f(a);
+
+assertEquals(0, a[0]);
+assertEquals(0, a[1]);
+
+// No-parameter constructor should fail right now.
+function abfunc1() {
+ return new ArrayBuffer();
+}
+assertThrows(abfunc1);
+
+// Test derivation from an ArrayBuffer
+var ab = new ArrayBuffer(12);
+assertInstance(ab, ArrayBuffer);
+var derived_uint8 = new Uint8Array(ab);
+assertInstance(derived_uint8, Uint8Array);
+assertSame(ab, derived_uint8.buffer);
+assertEquals(12, derived_uint8.length);
+assertEquals(12, derived_uint8.byteLength);
+assertEquals(0, derived_uint8.byteOffset);
+assertEquals(1, derived_uint8.BYTES_PER_ELEMENT);
+var derived_uint8_2 = new Uint8Array(ab,7);
+assertInstance(derived_uint8_2, Uint8Array);
+assertSame(ab, derived_uint8_2.buffer);
+assertEquals(5, derived_uint8_2.length);
+assertEquals(5, derived_uint8_2.byteLength);
+assertEquals(7, derived_uint8_2.byteOffset);
+assertEquals(1, derived_uint8_2.BYTES_PER_ELEMENT);
+var derived_int16 = new Int16Array(ab);
+assertInstance(derived_int16, Int16Array);
+assertSame(ab, derived_int16.buffer);
+assertEquals(6, derived_int16.length);
+assertEquals(12, derived_int16.byteLength);
+assertEquals(0, derived_int16.byteOffset);
+assertEquals(2, derived_int16.BYTES_PER_ELEMENT);
+var derived_int16_2 = new Int16Array(ab,6);
+assertInstance(derived_int16_2, Int16Array);
+assertSame(ab, derived_int16_2.buffer);
+assertEquals(3, derived_int16_2.length);
+assertEquals(6, derived_int16_2.byteLength);
+assertEquals(6, derived_int16_2.byteOffset);
+assertEquals(2, derived_int16_2.BYTES_PER_ELEMENT);
+var derived_uint32 = new Uint32Array(ab);
+assertInstance(derived_uint32, Uint32Array);
+assertSame(ab, derived_uint32.buffer);
+assertEquals(3, derived_uint32.length);
+assertEquals(12, derived_uint32.byteLength);
+assertEquals(0, derived_uint32.byteOffset);
+assertEquals(4, derived_uint32.BYTES_PER_ELEMENT);
+var derived_uint32_2 = new Uint32Array(ab,4);
+assertInstance(derived_uint32_2, Uint32Array);
+assertSame(ab, derived_uint32_2.buffer);
+assertEquals(2, derived_uint32_2.length);
+assertEquals(8, derived_uint32_2.byteLength);
+assertEquals(4, derived_uint32_2.byteOffset);
+assertEquals(4, derived_uint32_2.BYTES_PER_ELEMENT);
+var derived_uint32_3 = new Uint32Array(ab,4,1);
+assertInstance(derived_uint32_3, Uint32Array);
+assertSame(ab, derived_uint32_3.buffer);
+assertEquals(1, derived_uint32_3.length);
+assertEquals(4, derived_uint32_3.byteLength);
+assertEquals(4, derived_uint32_3.byteOffset);
+assertEquals(4, derived_uint32_3.BYTES_PER_ELEMENT);
+var derived_float64 = new Float64Array(ab,0,1);
+assertInstance(derived_float64, Float64Array);
+assertSame(ab, derived_float64.buffer);
+assertEquals(1, derived_float64.length);
+assertEquals(8, derived_float64.byteLength);
+assertEquals(0, derived_float64.byteOffset);
+assertEquals(8, derived_float64.BYTES_PER_ELEMENT);
+
+// If a given byteOffset and length references an area beyond the end of the
+// ArrayBuffer an exception is raised.
+function abfunc3() {
+ new Uint32Array(ab,4,3);
+}
+assertThrows(abfunc3);
+function abfunc4() {
+ new Uint32Array(ab,16);
+}
+assertThrows(abfunc4);
+
+// The given byteOffset must be a multiple of the element size of the specific
+// type, otherwise an exception is raised.
+function abfunc5() {
+ new Uint32Array(ab,5);
+}
+assertThrows(abfunc5);
+
+// If length is not explicitly specified, the length of the ArrayBuffer minus
+// the byteOffset must be a multiple of the element size of the specific type,
+// or an exception is raised.
+var ab2 = new ArrayBuffer(13);
+function abfunc6() {
+ new Uint32Array(ab2,4);
+}
+assertThrows(abfunc6);
+
+// Test that an array constructed without an array buffer creates one properly.
+a = new Uint8Array(31);
+assertEquals(a.byteLength, a.buffer.byteLength);
+assertEquals(a.length, a.buffer.byteLength);
+assertEquals(a.length * a.BYTES_PER_ELEMENT, a.buffer.byteLength);
+a = new Int16Array(5);
+assertEquals(a.byteLength, a.buffer.byteLength);
+assertEquals(a.length * a.BYTES_PER_ELEMENT, a.buffer.byteLength);
+a = new Float64Array(7);
+assertEquals(a.byteLength, a.buffer.byteLength);
+assertEquals(a.length * a.BYTES_PER_ELEMENT, a.buffer.byteLength);
+
+// Test that an implicitly created buffer is a valid buffer.
+a = new Float64Array(7);
+assertSame(a.buffer, (new Uint16Array(a.buffer)).buffer);
+assertSame(a.buffer, (new Float32Array(a.buffer,4)).buffer);
+assertSame(a.buffer, (new Int8Array(a.buffer,3,51)).buffer);
+assertInstance(a.buffer, ArrayBuffer);
+
+// Test the correct behavior of the |BYTES_PER_ELEMENT| property (which is
+// "constant", but not read-only).
+a = new Int32Array(2);
+assertEquals(4, a.BYTES_PER_ELEMENT);
+a.BYTES_PER_ELEMENT = 42;
+assertEquals(42, a.BYTES_PER_ELEMENT);
+a = new Uint8Array(2);
+assertEquals(1, a.BYTES_PER_ELEMENT);
+a = new Int16Array(2);
+assertEquals(2, a.BYTES_PER_ELEMENT);
+
+// Test Float64Arrays.
+function get(a, index) {
+ return a[index];
+}
+function set(a, index, value) {
+ a[index] = value;
+}
+function temp() {
+var array = new Float64Array(2);
+for (var i = 0; i < 5; i++) {
+ set(array, 0, 2.5);
+ assertEquals(2.5, array[0]);
+}
+%OptimizeFunctionOnNextCall(set);
+set(array, 0, 2.5);
+assertEquals(2.5, array[0]);
+set(array, 1, 3.5);
+assertEquals(3.5, array[1]);
+for (var i = 0; i < 5; i++) {
+ assertEquals(2.5, get(array, 0));
+ assertEquals(3.5, array[1]);
+}
+%OptimizeFunctionOnNextCall(get);
+assertEquals(2.5, get(array, 0));
+assertEquals(3.5, get(array, 1));
+}
+
+// Test non-number parameters.
+var array_with_length_from_non_number = new Int32Array("2");
+assertEquals(2, array_with_length_from_non_number.length);
+array_with_length_from_non_number = new Int32Array(undefined);
+assertEquals(0, array_with_length_from_non_number.length);
+var foo = { valueOf: function() { return 3; } };
+array_with_length_from_non_number = new Int32Array(foo);
+assertEquals(3, array_with_length_from_non_number.length);
+foo = { toString: function() { return "4"; } };
+array_with_length_from_non_number = new Int32Array(foo);
+assertEquals(4, array_with_length_from_non_number.length);
+
+
+// Test loads and stores.
+types = [Array, Int8Array, Uint8Array, Int16Array, Uint16Array, Int32Array,
+ Uint32Array, Uint8ClampedArray, Float32Array, Float64Array];
+
+test_result_nan = [NaN, 0, 0, 0, 0, 0, 0, 0, NaN, NaN];
+test_result_low_int = [-1, -1, 255, -1, 65535, -1, 0xFFFFFFFF, 0, -1, -1];
+test_result_low_double = [-1.25, -1, 255, -1, 65535, -1, 0xFFFFFFFF, 0, -1.25, -1.25];
+test_result_middle = [253.75, -3, 253, 253, 253, 253, 253, 254, 253.75, 253.75];
+test_result_high_int = [256, 0, 0, 256, 256, 256, 256, 255, 256, 256];
+test_result_high_double = [256.25, 0, 0, 256, 256, 256, 256, 255, 256.25, 256.25];
+
+const kElementCount = 40;
+
+function test_load(array, sum) {
+ for (var i = 0; i < kElementCount; i++) {
+ sum += array[i];
+ }
+ return sum;
+}
+
+function test_load_const_key(array, sum) {
+ sum += array[0];
+ sum += array[1];
+ sum += array[2];
+ return sum;
+}
+
+function test_store(array, sum) {
+ for (var i = 0; i < kElementCount; i++) {
+ sum += array[i] = i+1;
+ }
+ return sum;
+}
+
+function test_store_const_key(array, sum) {
+ sum += array[0] = 1;
+ sum += array[1] = 2;
+ sum += array[2] = 3;
+ return sum;
+}
+
+function zero() {
+ return 0.0;
+}
+
+function test_store_middle_tagged(array, sum) {
+ array[0] = 253.75;
+ return array[0];
+}
+
+function test_store_high_tagged(array, sum) {
+ array[0] = 256.25;
+ return array[0];
+}
+
+function test_store_middle_double(array, sum) {
+ array[0] = 253.75 + zero(); // + forces double type feedback
+ return array[0];
+}
+
+function test_store_high_double(array, sum) {
+ array[0] = 256.25 + zero(); // + forces double type feedback
+ return array[0];
+}
+
+function test_store_high_double(array, sum) {
+ array[0] = 256.25;
+ return array[0];
+}
+
+function test_store_low_int(array, sum) {
+ array[0] = -1;
+ return array[0];
+}
+
+function test_store_low_tagged(array, sum) {
+ array[0] = -1.25;
+ return array[0];
+}
+
+function test_store_low_double(array, sum) {
+ array[0] = -1.25 + zero(); // + forces double type feedback
+ return array[0];
+}
+
+function test_store_high_int(array, sum) {
+ array[0] = 256;
+ return array[0];
+}
+
+function test_store_nan(array, sum) {
+ array[0] = NaN;
+ return array[0];
+}
+
+const kRuns = 10;
+
+function run_test(test_func, array, expected_result) {
+ for (var i = 0; i < 5; i++) test_func(array, 0);
+ %OptimizeFunctionOnNextCall(test_func);
+ var sum = 0;
+ for (var i = 0; i < kRuns; i++) {
+ sum = test_func(array, sum);
+ }
+ assertEquals(expected_result, sum);
+ %DeoptimizeFunction(test_func);
+ gc(); // Makes V8 forget about type information for test_func.
+}
+
+function run_bounds_test(test_func, array, expected_result) {
+ assertEquals(undefined, a[kElementCount]);
+ a[kElementCount] = 456;
+ assertEquals(undefined, a[kElementCount]);
+ assertEquals(undefined, a[kElementCount+1]);
+ a[kElementCount+1] = 456;
+ assertEquals(undefined, a[kElementCount+1]);
+}
+
+for (var t = 0; t < types.length; t++) {
+ var type = types[t];
+ var a = new type(kElementCount);
+
+ for (var i = 0; i < kElementCount; i++) {
+ a[i] = i;
+ }
+
+ // Run test functions defined above.
+ run_test(test_load, a, 780 * kRuns);
+ run_test(test_load_const_key, a, 3 * kRuns);
+ run_test(test_store, a, 820 * kRuns);
+ run_test(test_store_const_key, a, 6 * kRuns);
+ run_test(test_store_low_int, a, test_result_low_int[t]);
+ run_test(test_store_low_double, a, test_result_low_double[t]);
+ run_test(test_store_low_tagged, a, test_result_low_double[t]);
+ run_test(test_store_high_int, a, test_result_high_int[t]);
+ run_test(test_store_nan, a, test_result_nan[t]);
+ run_test(test_store_middle_double, a, test_result_middle[t]);
+ run_test(test_store_middle_tagged, a, test_result_middle[t]);
+ run_test(test_store_high_double, a, test_result_high_double[t]);
+ run_test(test_store_high_tagged, a, test_result_high_double[t]);
+
+ // Test the correct behavior of the |length| property (which is read-only).
+ if (t != 0) {
+ assertEquals(kElementCount, a.length);
+ a.length = 2;
+ assertEquals(kElementCount, a.length);
+ assertTrue(delete a.length);
+ a.length = 2;
+ assertEquals(2, a.length);
+
+ // Make sure bounds checks are handled correctly for external arrays.
+ run_bounds_test(a);
+ run_bounds_test(a);
+ run_bounds_test(a);
+ %OptimizeFunctionOnNextCall(run_bounds_test);
+ run_bounds_test(a);
+ %DeoptimizeFunction(run_bounds_test);
+ gc(); // Makes V8 forget about type information for test_func.
+
+ }
+
+ function array_load_set_smi_check(a) {
+ return a[0] = a[0] = 1;
+ }
+
+ array_load_set_smi_check(a);
+ array_load_set_smi_check(0);
+
+ function array_load_set_smi_check2(a) {
+ return a[0] = a[0] = 1;
+ }
+
+ array_load_set_smi_check2(a);
+ %OptimizeFunctionOnNextCall(array_load_set_smi_check2);
+ array_load_set_smi_check2(a);
+ array_load_set_smi_check2(0);
+ %DeoptimizeFunction(array_load_set_smi_check2);
+ gc(); // Makes V8 forget about type information for array_load_set_smi_check.
+}
+
+// Check handling of undefined in 32- and 64-bit external float arrays.
+
+function store_float32_undefined(ext_array) {
+ ext_array[0] = undefined;
+}
+
+var float32_array = new Float32Array(1);
+// Make sure runtime does it right
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+// Make sure the ICs do it right
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+// Make sure that Cranskshft does it right.
+%OptimizeFunctionOnNextCall(store_float32_undefined);
+store_float32_undefined(float32_array);
+assertTrue(isNaN(float32_array[0]));
+
+function store_float64_undefined(ext_array) {
+ ext_array[0] = undefined;
+}
+
+var float64_array = new Float64Array(1);
+// Make sure runtime does it right
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
+// Make sure the ICs do it right
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
+// Make sure that Cranskshft does it right.
+%OptimizeFunctionOnNextCall(store_float64_undefined);
+store_float64_undefined(float64_array);
+assertTrue(isNaN(float64_array[0]));
+
+
+// Check handling of 0-sized buffers and arrays.
+ab = new ArrayBuffer(0);
+assertInstance(ab, ArrayBuffer);
+assertEquals(0, ab.byteLength);
+a = new Int8Array(ab);
+assertInstance(a, Int8Array);
+assertEquals(0, a.byteLength);
+assertEquals(0, a.length);
+a[0] = 1;
+assertEquals(undefined, a[0]);
+ab = new ArrayBuffer(16);
+assertInstance(ab, ArrayBuffer);
+a = new Float32Array(ab,4,0);
+assertInstance(a, Float32Array);
+assertEquals(0, a.byteLength);
+assertEquals(0, a.length);
+a[0] = 1;
+assertEquals(undefined, a[0]);
+a = new Uint16Array(0);
+assertInstance(a, Uint16Array);
+assertEquals(0, a.byteLength);
+assertEquals(0, a.length);
+a[0] = 1;
+assertEquals(undefined, a[0]);
+
+
+// Check construction from arrays.
+a = new Uint32Array([]);
+assertInstance(a, Uint32Array);
+assertEquals(0, a.length);
+assertEquals(0, a.byteLength);
+assertEquals(0, a.buffer.byteLength);
+assertEquals(4, a.BYTES_PER_ELEMENT);
+assertInstance(a.buffer, ArrayBuffer);
+a = new Uint16Array([1,2,3]);
+assertInstance(a, Uint16Array);
+assertEquals(3, a.length);
+assertEquals(6, a.byteLength);
+assertEquals(6, a.buffer.byteLength);
+assertEquals(2, a.BYTES_PER_ELEMENT);
+assertEquals(1, a[0]);
+assertEquals(3, a[2]);
+assertInstance(a.buffer, ArrayBuffer);
+a = new Uint32Array(a);
+assertInstance(a, Uint32Array);
+assertEquals(3, a.length);
+assertEquals(12, a.byteLength);
+assertEquals(12, a.buffer.byteLength);
+assertEquals(4, a.BYTES_PER_ELEMENT);
+assertEquals(1, a[0]);
+assertEquals(3, a[2]);
+assertInstance(a.buffer, ArrayBuffer);
+
+// Check subarrays.
+a = new Uint16Array([1,2,3,4,5,6]);
+aa = a.subarray(3);
+assertInstance(aa, Uint16Array);
+assertEquals(3, aa.length);
+assertEquals(6, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(3,5);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(4,8);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(9);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-4);
+assertInstance(aa, Uint16Array);
+assertEquals(4, aa.length);
+assertEquals(8, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-3,-1);
+assertInstance(aa, Uint16Array);
+assertEquals(2, aa.length);
+assertEquals(4, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(3,2);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(-3,-4);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+aa = a.subarray(0,-8);
+assertInstance(aa, Uint16Array);
+assertEquals(0, aa.length);
+assertEquals(0, aa.byteLength);
+assertEquals(2, aa.BYTES_PER_ELEMENT);
+assertSame(a.buffer, aa.buffer);
+
+assertThrows(function(){ a.subarray.call({}, 0) });
+assertThrows(function(){ a.subarray.call([], 0) });
+assertThrows(function(){ a.subarray.call(a) });
+
+
+// Call constructors directly as functions, and through .call and .apply
+
+b = ArrayBuffer(100)
+a = Int8Array(b, 5, 77)
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Int8Array)
+assertSame(b, a.buffer)
+assertEquals(5, a.byteOffset)
+assertEquals(77, a.byteLength)
+b = ArrayBuffer.call(null, 10)
+a = Uint16Array.call(null, b, 2, 4)
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Uint16Array)
+assertSame(b, a.buffer)
+assertEquals(2, a.byteOffset)
+assertEquals(8, a.byteLength)
+b = ArrayBuffer.apply(null, [1000])
+a = Float32Array.apply(null, [b, 128, 1])
+assertInstance(b, ArrayBuffer)
+assertInstance(a, Float32Array)
+assertSame(b, a.buffer)
+assertEquals(128, a.byteOffset)
+assertEquals(4, a.byteLength)
+
+
+// Test array.set in different combinations.
+
+function assertArrayPrefix(expected, array) {
+ for (var i = 0; i < expected.length; ++i) {
+ assertEquals(expected[i], array[i]);
+ }
+}
+
+var a11 = new Int16Array([1, 2, 3, 4, 0, -1])
+var a12 = new Uint16Array(15)
+a12.set(a11, 3)
+assertArrayPrefix([0, 0, 0, 1, 2, 3, 4, 0, 0xffff, 0, 0], a12)
+assertThrows(function(){ a11.set(a12) })
+
+var a21 = [1, undefined, 10, NaN, 0, -1, {valueOf: function() {return 3}}]
+var a22 = new Int32Array(12)
+a22.set(a21, 2)
+assertArrayPrefix([0, 0, 1, 0, 10, 0, 0, -1, 3, 0], a22)
+
+var a31 = new Float32Array([2, 4, 6, 8, 11, NaN, 1/0, -3])
+var a32 = a31.subarray(2, 6)
+a31.set(a32, 4)
+assertArrayPrefix([2, 4, 6, 8, 6, 8, 11, NaN], a31)
+assertArrayPrefix([6, 8, 6, 8], a32)
+
+var a4 = new Uint8ClampedArray([3,2,5,6])
+a4.set(a4)
+assertArrayPrefix([3, 2, 5, 6], a4)
+
+// Cases with overlapping backing store but different element sizes.
+var b = new ArrayBuffer(4)
+var a5 = new Int16Array(b)
+var a50 = new Int8Array(b)
+var a51 = new Int8Array(b, 0, 2)
+var a52 = new Int8Array(b, 1, 2)
+var a53 = new Int8Array(b, 2, 2)
+
+a5.set([0x5050, 0x0a0a])
+assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+assertArrayPrefix([0x50, 0x50], a51)
+assertArrayPrefix([0x50, 0x0a], a52)
+assertArrayPrefix([0x0a, 0x0a], a53)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a51.set(a5)
+assertArrayPrefix([0x50, 0x0a, 0x0a, 0x0a], a50)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a52.set(a5)
+assertArrayPrefix([0x50, 0x50, 0x0a, 0x0a], a50)
+
+a50.set([0x50, 0x50, 0x0a, 0x0a])
+a53.set(a5)
+assertArrayPrefix([0x50, 0x50, 0x50, 0x0a], a50)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a51)
+assertArrayPrefix([0x0050, 0x0051], a5)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a52)
+assertArrayPrefix([0x0051, 0x000a], a5)
+
+a50.set([0x50, 0x51, 0x0a, 0x0b])
+a5.set(a53)
+assertArrayPrefix([0x000a, 0x000b], a5)
+
+// Mixed types of same size.
+var a61 = new Float32Array([1.2, 12.3])
+var a62 = new Int32Array(2)
+a62.set(a61)
+assertArrayPrefix([1, 12], a62)
+a61.set(a62)
+assertArrayPrefix([1, 12], a61)
+
+// Invalid source
+assertThrows(function() { a.set(0) })
+assertThrows(function() { a.set({}) })
+
+
+// Test arraybuffer.slice
+
+var a0 = new Int8Array([1, 2, 3, 4, 5, 6])
+var b0 = a0.buffer
+
+var b1 = b0.slice(0)
+assertEquals(b0.byteLength, b1.byteLength)
+assertArrayPrefix([1, 2, 3, 4, 5, 6], Int8Array(b1))
+
+var b2 = b0.slice(3)
+assertEquals(b0.byteLength - 3, b2.byteLength)
+assertArrayPrefix([4, 5, 6], Int8Array(b2))
+
+var b3 = b0.slice(2, 4)
+assertEquals(2, b3.byteLength)
+assertArrayPrefix([3, 4], Int8Array(b3))
+
+function goo(a, i) {
+ return a[i];
+}
+
+function boo(a, i, v) {
+ return a[i] = v;
+}
+
+function do_tagged_index_external_array_test(constructor) {
+ var t_array = new constructor([1, 2, 3, 4, 5, 6]);
+ assertEquals(1, goo(t_array, 0));
+ assertEquals(1, goo(t_array, 0));
+ boo(t_array, 0, 13);
+ assertEquals(13, goo(t_array, 0));
+ %OptimizeFunctionOnNextCall(goo);
+ %OptimizeFunctionOnNextCall(boo);
+ boo(t_array, 0, 15);
+ assertEquals(15, goo(t_array, 0));
+ %ClearFunctionTypeFeedback(goo);
+ %ClearFunctionTypeFeedback(boo);
+}
+
+do_tagged_index_external_array_test(Int8Array);
+do_tagged_index_external_array_test(Uint8Array);
+do_tagged_index_external_array_test(Int16Array);
+do_tagged_index_external_array_test(Uint16Array);
+do_tagged_index_external_array_test(Int32Array);
+do_tagged_index_external_array_test(Uint32Array);
+do_tagged_index_external_array_test(Float32Array);
+do_tagged_index_external_array_test(Float64Array);
+
+var built_in_array = new Array(1, 2, 3, 4, 5, 6);
+assertEquals(1, goo(built_in_array, 0));
+assertEquals(1, goo(built_in_array, 0));
+%OptimizeFunctionOnNextCall(goo);
+%OptimizeFunctionOnNextCall(boo);
+boo(built_in_array, 0, 11);
+assertEquals(11, goo(built_in_array, 0));
+%ClearFunctionTypeFeedback(goo);
+%ClearFunctionTypeFeedback(boo);
+
+built_in_array = new Array(1.5, 2, 3, 4, 5, 6);
+assertEquals(1.5, goo(built_in_array, 0));
+assertEquals(1.5, goo(built_in_array, 0));
+%OptimizeFunctionOnNextCall(goo);
+%OptimizeFunctionOnNextCall(boo);
+boo(built_in_array, 0, 2.5);
+assertEquals(2.5, goo(built_in_array, 0));
+%ClearFunctionTypeFeedback(goo);
+%ClearFunctionTypeFeedback(boo);
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index 0219f3936..d60c59c90 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -25,7 +25,7 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// Flags: --harmony-collections --expose-gc
+// Flags: --harmony-collections --expose-gc --allow-natives-syntax
// Test valid getter and setter calls on Sets.
@@ -254,6 +254,27 @@ assertTrue(WeakMap.prototype.has instanceof Function)
assertTrue(WeakMap.prototype.delete instanceof Function)
+// Test class of the Set, Map and WeakMap instance and prototype.
+assertEquals("Set", %_ClassOf(new Set))
+assertEquals("Object", %_ClassOf(Set.prototype))
+assertEquals("Map", %_ClassOf(new Map))
+assertEquals("Object", %_ClassOf(Map.prototype))
+assertEquals("WeakMap", %_ClassOf(new WeakMap))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+
+
+// Test constructor property of the Set, Map and WeakMap prototype.
+function TestConstructor(C) {
+ assertFalse(C === Object.prototype.constructor);
+ assertSame(C, C.prototype.constructor);
+ assertSame(C, C().__proto__.constructor);
+ assertSame(C, (new C).__proto__.constructor);
+}
+TestConstructor(Set);
+TestConstructor(Map);
+TestConstructor(WeakMap);
+
+
// Regression test for WeakMap prototype.
assertTrue(WeakMap.prototype.constructor === WeakMap)
assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
diff --git a/deps/v8/test/mjsunit/harmony/generators-objects.js b/deps/v8/test/mjsunit/harmony/generators-objects.js
new file mode 100644
index 000000000..0c36818c8
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/generators-objects.js
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-generators --harmony-scoping
+
+// Test instantations of generators.
+
+// Generators shouldn't allocate stack slots. This test will abort in debug
+// mode if generators have stack slots.
+function TestContextAllocation() {
+ function* g1(a, b, c) { yield 1; return [a, b, c]; }
+ function* g2() { yield 1; return arguments; }
+ function* g3() { yield 1; return this; }
+ function* g4() { var x = 10; yield 1; return x; }
+ // Temporary variable context allocation
+ function* g5(l) { "use strict"; yield 1; for (let x in l) { yield x; } }
+
+ g1();
+ g2();
+ g3();
+ g4();
+ g5(["foo"]);
+}
+TestContextAllocation();
+
+
+// Test the properties and prototype of a generator object.
+function TestGeneratorObject() {
+ function* g() { yield 1; }
+
+ var iter = g();
+ assertSame(g.prototype, Object.getPrototypeOf(iter));
+ assertTrue(iter instanceof g);
+ assertEquals([], Object.getOwnPropertyNames(iter));
+ assertTrue(iter !== g());
+
+ // g() is the same as new g().
+ iter = new g();
+ assertSame(g.prototype, Object.getPrototypeOf(iter));
+ assertTrue(iter instanceof g);
+ assertEquals([], Object.getOwnPropertyNames(iter));
+ assertTrue(iter !== new g());
+}
+TestGeneratorObject();
diff --git a/deps/v8/test/mjsunit/harmony/generators-parsing.js b/deps/v8/test/mjsunit/harmony/generators-parsing.js
index 0e5494df1..49a44ba32 100644
--- a/deps/v8/test/mjsunit/harmony/generators-parsing.js
+++ b/deps/v8/test/mjsunit/harmony/generators-parsing.js
@@ -74,7 +74,7 @@ function* g() { yield ({ get yield() { return 1; }}) }
// mode or in generators.
function f() { yield: 1 }
assertThrows("function f() { \"use strict\"; yield: 1 }", SyntaxError)
-assertThrows("function f*() { yield: 1 }", SyntaxError)
+assertThrows("function* g() { yield: 1 }", SyntaxError)
// Yield is only a keyword in the body of the generator, not in nested
// functions.
diff --git a/deps/v8/test/mjsunit/harmony/generators-runtime.js b/deps/v8/test/mjsunit/harmony/generators-runtime.js
new file mode 100644
index 000000000..d28140c19
--- /dev/null
+++ b/deps/v8/test/mjsunit/harmony/generators-runtime.js
@@ -0,0 +1,126 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-generators
+
+// Test aspects of the generator runtime.
+
+// FIXME(wingo): Replace this reference with a more official link.
+// See:
+// http://wiki.ecmascript.org/lib/exe/fetch.php?cache=cache&media=harmony:es6_generator_object_model_3-29-13.png
+
+function f() { }
+function* g() { yield 1; }
+var GeneratorFunctionPrototype = Object.getPrototypeOf(g);
+var GeneratorFunction = GeneratorFunctionPrototype.constructor;
+var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
+
+// A generator function should have the same set of properties as any
+// other function.
+function TestGeneratorFunctionInstance() {
+ var f_own_property_names = Object.getOwnPropertyNames(f);
+ var g_own_property_names = Object.getOwnPropertyNames(g);
+
+ f_own_property_names.sort();
+ g_own_property_names.sort();
+
+ assertArrayEquals(f_own_property_names, g_own_property_names);
+ var i;
+ for (i = 0; i < f_own_property_names.length; i++) {
+ var prop = f_own_property_names[i];
+ var f_desc = Object.getOwnPropertyDescriptor(f, prop);
+ var g_desc = Object.getOwnPropertyDescriptor(g, prop);
+ assertEquals(f_desc.configurable, g_desc.configurable, prop);
+ assertEquals(f_desc.writable, g_desc.writable, prop);
+ assertEquals(f_desc.enumerable, g_desc.enumerable, prop);
+ }
+}
+TestGeneratorFunctionInstance();
+
+
+// Generators have an additional object interposed in the chain between
+// themselves and Function.prototype.
+function TestGeneratorFunctionPrototype() {
+ // Sanity check.
+ assertSame(Object.getPrototypeOf(f), Function.prototype);
+ assertFalse(GeneratorFunctionPrototype === Function.prototype);
+ assertSame(Function.prototype,
+ Object.getPrototypeOf(GeneratorFunctionPrototype));
+ assertSame(GeneratorFunctionPrototype,
+ Object.getPrototypeOf(function* () {}));
+}
+TestGeneratorFunctionPrototype();
+
+
+// Functions that we associate with generator objects are actually defined by
+// a common prototype.
+function TestGeneratorObjectPrototype() {
+ assertSame(Object.prototype,
+ Object.getPrototypeOf(GeneratorObjectPrototype));
+ assertSame(GeneratorObjectPrototype,
+ Object.getPrototypeOf((function*(){yield 1}).prototype));
+
+ var expected_property_names = ["next", "send", "throw", "close",
+ "constructor"];
+ var found_property_names =
+ Object.getOwnPropertyNames(GeneratorObjectPrototype);
+
+ expected_property_names.sort();
+ found_property_names.sort();
+
+ assertArrayEquals(expected_property_names, found_property_names);
+}
+TestGeneratorObjectPrototype();
+
+
+// This tests the object that would be called "GeneratorFunction", if it were
+// like "Function".
+function TestGeneratorFunction() {
+ assertSame(GeneratorFunctionPrototype, GeneratorFunction.prototype);
+ assertTrue(g instanceof GeneratorFunction);
+
+ assertSame(Function, Object.getPrototypeOf(GeneratorFunction));
+ assertTrue(g instanceof Function);
+
+ // Not all functions are generators.
+ assertTrue(f instanceof Function); // Sanity check.
+ assertTrue(!(f instanceof GeneratorFunction));
+}
+TestGeneratorFunction();
+
+
+function TestPerGeneratorPrototype() {
+ assertTrue((function*(){}).prototype !== (function*(){}).prototype);
+ assertTrue((function*(){}).prototype !== g.prototype);
+ assertTrue(g.prototype instanceof GeneratorFunctionPrototype);
+ assertSame(GeneratorObjectPrototype, Object.getPrototypeOf(g.prototype));
+ assertTrue(!(g.prototype instanceof Function));
+ assertSame(typeof (g.prototype), "object");
+
+ assertArrayEquals([], Object.getOwnPropertyNames(g.prototype));
+}
+TestPerGeneratorPrototype();
diff --git a/deps/v8/test/mjsunit/harmony/symbols.js b/deps/v8/test/mjsunit/harmony/symbols.js
index a3f6e5720..5eaa1a37d 100644
--- a/deps/v8/test/mjsunit/harmony/symbols.js
+++ b/deps/v8/test/mjsunit/harmony/symbols.js
@@ -82,6 +82,23 @@ function TestPrototype() {
TestPrototype()
+function TestConstructor() {
+ assertFalse(Object === Symbol.prototype.constructor)
+ assertFalse(Symbol === Object.prototype.constructor)
+ assertSame(Symbol, Symbol.prototype.constructor)
+ assertSame(Symbol, Symbol().__proto__.constructor)
+ assertSame(Symbol, Symbol(Symbol()).__proto__.constructor)
+ assertSame(Symbol, (new Symbol).__proto__.constructor)
+ assertSame(Symbol, (new Symbol()).__proto__.constructor)
+ assertSame(Symbol, (new Symbol(Symbol())).__proto__.constructor)
+ assertSame(Symbol, Object(Symbol()).__proto__.constructor)
+ for (var i in symbols) {
+ assertSame(Symbol, symbols[i].__proto__.constructor)
+ }
+}
+TestConstructor()
+
+
function TestName() {
for (var i in symbols) {
var name = symbols[i].name
diff --git a/deps/v8/test/mjsunit/harmony/typedarrays.js b/deps/v8/test/mjsunit/harmony/typedarrays.js
index 9b01ba60e..75ff3da42 100644
--- a/deps/v8/test/mjsunit/harmony/typedarrays.js
+++ b/deps/v8/test/mjsunit/harmony/typedarrays.js
@@ -27,6 +27,8 @@
// Flags: --harmony-typed-arrays
+// ArrayBuffer
+
function TestByteLength(param, expectedByteLength) {
var ab = new __ArrayBuffer(param);
assertSame(expectedByteLength, ab.byteLength);
@@ -104,8 +106,98 @@ function TestArrayBufferSlice() {
TestArrayBufferSlice();
+// Typed arrays
+
+function TestTypedArray(proto, elementSize, typicalElement) {
+ var ab = new __ArrayBuffer(256*elementSize);
+
+ var a1 = new proto(ab, 128*elementSize, 128);
+ assertSame(ab, a1.buffer);
+ assertSame(elementSize, a1.BYTES_PER_ELEMENT);
+ assertSame(128, a1.length);
+ assertSame(128*elementSize, a1.byteLength);
+ assertSame(128*elementSize, a1.byteOffset);
+
+
+ var a2 = new proto(ab, 64*elementSize, 128);
+ assertSame(ab, a2.buffer);
+ assertSame(elementSize, a2.BYTES_PER_ELEMENT);
+ assertSame(128, a2.length);
+ assertSame(128*elementSize, a2.byteLength);
+ assertSame(64*elementSize, a2.byteOffset);
+
+ var a3 = new proto(ab, 192*elementSize);
+ assertSame(ab, a3.buffer);
+ assertSame(64, a3.length);
+ assertSame(64*elementSize, a3.byteLength);
+ assertSame(192*elementSize, a3.byteOffset);
+
+ var a4 = new proto(ab);
+ assertSame(ab, a4.buffer);
+ assertSame(256, a4.length);
+ assertSame(256*elementSize, a4.byteLength);
+ assertSame(0, a4.byteOffset);
+
+
+ var i;
+ for (i = 0; i < 128; i++) {
+ a1[i] = typicalElement;
+ }
+
+ for (i = 0; i < 128; i++) {
+ assertSame(typicalElement, a1[i]);
+ }
+
+ for (i = 0; i < 64; i++) {
+ assertSame(0, a2[i]);
+ }
+
+ for (i = 64; i < 128; i++) {
+ assertSame(typicalElement, a2[i]);
+ }
+
+ for (i = 0; i < 64; i++) {
+ assertSame(typicalElement, a3[i]);
+ }
+
+ for (i = 0; i < 128; i++) {
+ assertSame(0, a4[i]);
+ }
+
+ for (i = 128; i < 256; i++) {
+ assertSame(typicalElement, a4[i]);
+ }
+
+ assertThrows(function () { new proto(ab, 256*elementSize); }, RangeError);
+
+ if (elementSize !== 1) {
+ assertThrows(function() { new proto(ab, 128*elementSize - 1, 10); },
+ RangeError);
+ var unalignedArrayBuffer = new __ArrayBuffer(10*elementSize + 1);
+ var goodArray = new proto(unalignedArrayBuffer, 0, 10);
+ assertSame(10, goodArray.length);
+ assertSame(10*elementSize, goodArray.byteLength);
+ assertThrows(function() { new proto(unalignedArrayBuffer)}, RangeError);
+ assertThrows(function() { new proto(unalignedArrayBuffer, 5*elementSize)},
+ RangeError);
+ }
+
+}
+
+TestTypedArray(__Uint8Array, 1, 0xFF);
+TestTypedArray(__Int8Array, 1, -0x7F);
+TestTypedArray(__Uint16Array, 2, 0xFFFF);
+TestTypedArray(__Int16Array, 2, -0x7FFF);
+TestTypedArray(__Uint32Array, 4, 0xFFFFFFFF);
+TestTypedArray(__Int32Array, 4, -0x7FFFFFFF);
+TestTypedArray(__Float32Array, 4, 0.5);
+TestTypedArray(__Float64Array, 8, 0.5);
+
+
+// General tests for properties
+
// Test property attribute [[Enumerable]]
-function TestEnumerable(func) {
+function TestEnumerable(func, obj) {
function props(x) {
var array = [];
for (var p in x) array.push(p);
@@ -113,9 +205,11 @@ function TestEnumerable(func) {
}
assertArrayEquals([], props(func));
assertArrayEquals([], props(func.prototype));
- assertArrayEquals([], props(new func()));
+ if (obj)
+ assertArrayEquals([], props(obj));
}
-TestEnumerable(__ArrayBuffer);
+TestEnumerable(__ArrayBuffer, new __ArrayBuffer());
+TestEnumerable(__Uint8Array);
// Test arbitrary properties on ArrayBuffer
@@ -131,6 +225,5 @@ function TestArbitrary(m) {
}
TestArbitrary(new __ArrayBuffer(256));
-
// Test direct constructor call
assertTrue(__ArrayBuffer() instanceof __ArrayBuffer);
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 1c3602c40..09097db9f 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -44,11 +44,16 @@ regress/regress-524: SKIP
stack-traces-gc: PASS || FAIL
##############################################################################
-# Too slow in debug mode with --stress-opt
+# Too slow in debug mode with --stress-opt mode.
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
compiler/regress-funcaller: PASS, SKIP if $mode == debug
regress/regress-2318: PASS, SKIP if $mode == debug
regress/regress-create-exception: PASS, SKIP if $mode == debug
+regress/regress-2612: PASS, SKIP if $mode == debug
+
+##############################################################################
+# Too slow in debug mode for GC stress mode.
+regress/regress-crbug-217858: PASS, SKIP if $mode == debug
##############################################################################
# These use a built-in that's only present in debug mode. They take
@@ -197,3 +202,34 @@ debug-liveedit-stack-padding: SKIP
debug-liveedit-restart-frame: SKIP
debug-liveedit-double-call: SKIP
+##############################################################################
+# Native Client uses the ARM simulator so will behave similarly to arm
+# on mjsunit tests.
+# TODO(bradchen): enable more tests for NaCl V8 when it stops using
+# the ARM simulator.
+##############################################################################
+[ $arch == nacl_ia32 || $arch == nacl_x64 ]
+# There is no /tmp directory for NaCl runs
+d8-os: SKIP
+
+# Stack manipulations in LiveEdit is not implemented for this arch.
+debug-liveedit-check-stack: SKIP
+debug-liveedit-stack-padding: SKIP
+debug-liveedit-restart-frame: SKIP
+debug-liveedit-double-call: SKIP
+
+# This test dumps core for arm.debug, so no reason to expect it to work
+# for NaCl. The other three fuzz-natives tests seem to run fine.
+# As noted above none of them are run in the arm.debug case.
+fuzz-natives-part4: SKIP
+
+# Requires bigger stack size in the Genesis and if stack size is increased,
+# the test requires too much time to run. However, the problem test covers
+# should be platform-independent.
+regress/regress-1132: SKIP
+
+# Poor performance for NaCl V8 causes an assertion failure for this test.
+regress/regress-165637: SKIP
+
+# Skip long running test that times out in debug mode and goes OOM on NaCl.
+regress/regress-crbug-160010: SKIP
diff --git a/deps/v8/test/mjsunit/object-define-property.js b/deps/v8/test/mjsunit/object-define-property.js
index 970a80334..835d0e0a5 100644
--- a/deps/v8/test/mjsunit/object-define-property.js
+++ b/deps/v8/test/mjsunit/object-define-property.js
@@ -918,6 +918,11 @@ assertFalse(desc.writable);
assertFalse(desc.enumerable);
assertFalse(desc.configurable);
+// Define non-array property, check that .length is unaffected.
+assertEquals(16, arr.length);
+Object.defineProperty(arr, '0x20', descElement);
+assertEquals(16, arr.length);
+
// See issue 968: http://code.google.com/p/v8/issues/detail?id=968
var o = { x : 42 };
Object.defineProperty(o, "x", { writable: false });
diff --git a/deps/v8/test/mjsunit/pixel-array-rounding.js b/deps/v8/test/mjsunit/pixel-array-rounding.js
index 0c307e62e..b7db51c2c 100755
--- a/deps/v8/test/mjsunit/pixel-array-rounding.js
+++ b/deps/v8/test/mjsunit/pixel-array-rounding.js
@@ -27,12 +27,15 @@
// Flags: --allow-natives-syntax
-var pixels = new Uint8ClampedArray(8);
+var pixels = new Uint8ClampedArray(11);
function f() {
for (var i = 0; i < 8; i++) {
pixels[i] = (i * 1.1);
}
+ pixels[8] = 255.5;
+ pixels[9] = NaN;
+ pixels[10] = -0.5;
return pixels[1] + pixels[6];
}
@@ -42,3 +45,6 @@ assertEquals(6, pixels[5]);
%OptimizeFunctionOnNextCall(f);
f();
assertEquals(6, pixels[5]);
+assertEquals(255, pixels[8]);
+assertEquals(0, pixels[9]);
+assertEquals(0, pixels[10]);
diff --git a/deps/v8/test/mjsunit/proto-poison.js b/deps/v8/test/mjsunit/proto-poison.js
new file mode 100644
index 000000000..ca3b5d6d0
--- /dev/null
+++ b/deps/v8/test/mjsunit/proto-poison.js
@@ -0,0 +1,45 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that the __proto__ accessor is properly poisoned when extracted
+// from Object.prototype using the property descriptor.
+var desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertEquals("function", typeof desc.get);
+assertEquals("function", typeof desc.set);
+assertDoesNotThrow("desc.get.call({})");
+assertThrows("desc.set.call({})", TypeError);
+
+// Check that any redefinition of the __proto__ accessor causes poising
+// to cease and the accessor to be extracted normally.
+Object.defineProperty(Object.prototype, "__proto__", { get:function(){} });
+desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertDoesNotThrow("desc.get.call({})");
+assertThrows("desc.set.call({})", TypeError);
+Object.defineProperty(Object.prototype, "__proto__", { set:function(x){} });
+desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertDoesNotThrow("desc.get.call({})");
+assertDoesNotThrow("desc.set.call({})");
diff --git a/deps/v8/test/mjsunit/regress/readonly5.js b/deps/v8/test/mjsunit/regress/readonly5.js
new file mode 100644
index 000000000..b1499ddfc
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/readonly5.js
@@ -0,0 +1,68 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+this.__proto__ = null;
+this.x = 10;
+delete this.x;
+
+function s(v) {
+ return v.x = 1;
+}
+
+function s_strict(v) {
+ "use strict";
+ return v.x = 1;
+}
+
+function c() {
+ var o = {__proto__:this};
+ return o;
+}
+
+var o1 = c();
+var o2 = c();
+var o1_strict = c();
+var o2_strict = c();
+var o3 = c();
+var o4 = c();
+
+// Initialize the store IC.
+s(o1);
+s(o2);
+s_strict(o1_strict);
+s_strict(o2_strict);
+
+Object.defineProperty(this, "x", {writable:false, configurable:true});
+
+// Verify that directly setting x fails.
+o3.x = 1;
+assertEquals(undefined, o3.x);
+
+// Verify that setting x through the IC fails.
+assertThrows("s_strict(o4)", TypeError);
+s(o4);
+assertEquals(undefined, o4.x);
diff --git a/deps/v8/test/mjsunit/regress/regress-2273.js b/deps/v8/test/mjsunit/regress/regress-2273.js
new file mode 100644
index 000000000..7868b8da2
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2273.js
@@ -0,0 +1,103 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var CheckStringReceiver = function() {
+ "use strict";
+ // Receivers of strict functions are not coerced.
+ assertEquals("string", typeof this);
+};
+
+var CheckNumberReceiver = function() {
+ "use strict";
+ // Receivers of strict functions are not coerced.
+ assertEquals("number", typeof this);
+};
+
+var CheckUndefinedReceiver = function() {
+ "use strict";
+ // Receivers of strict functions are not coerced.
+ assertEquals("undefined", String(this));
+};
+
+var CheckNullReceiver = function() {
+ "use strict";
+ // Receivers of strict functions are not coerced.
+ assertEquals("null", String(this));
+};
+
+var CheckCoersion = function() {
+ // Receivers of non-strict functions are coerced to objects.
+ assertEquals("object", typeof this);
+};
+
+
+function strict_mode() {
+ "use strict";
+ CheckStringReceiver.call("foo");
+ CheckNumberReceiver.call(42);
+ CheckUndefinedReceiver.call(undefined);
+ CheckNullReceiver.call(null);
+ [1].forEach(CheckStringReceiver, "foo");
+ [2].every(CheckStringReceiver, "foo");
+ [3].filter(CheckStringReceiver, "foo");
+ [4].some(CheckNumberReceiver, 42);
+ [5].map(CheckNumberReceiver, 42);
+
+ CheckCoersion.call("foo");
+ CheckCoersion.call(42);
+ CheckCoersion.call(undefined);
+ CheckCoersion.call(null);
+ [1].forEach(CheckCoersion, "foo");
+ [2].every(CheckCoersion, "foo");
+ [3].filter(CheckCoersion, "foo");
+ [4].some(CheckCoersion, 42);
+ [5].map(CheckCoersion, 42);
+};
+strict_mode();
+
+function classic_mode() {
+ CheckStringReceiver.call("foo");
+ CheckNumberReceiver.call(42);
+ CheckUndefinedReceiver.call(undefined);
+ CheckNullReceiver.call(null);
+ [1].forEach(CheckStringReceiver, "foo");
+ [2].every(CheckStringReceiver, "foo");
+ [3].filter(CheckStringReceiver, "foo");
+ [4].some(CheckNumberReceiver, 42);
+ [5].map(CheckNumberReceiver, 42);
+
+ CheckCoersion.call("foo");
+ CheckCoersion.call(42);
+ CheckCoersion.call(undefined);
+ CheckCoersion.call(null);
+ [1].forEach(CheckCoersion, "foo");
+ [2].every(CheckCoersion, "foo");
+ [3].filter(CheckCoersion, "foo");
+ [4].some(CheckCoersion, 42);
+ [5].map(CheckCoersion, 42);
+};
+classic_mode();
diff --git a/deps/v8/test/mjsunit/regress/regress-2595.js b/deps/v8/test/mjsunit/regress/regress-2595.js
new file mode 100644
index 000000000..5bb5f6d16
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2595.js
@@ -0,0 +1,57 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var p = { f: function () { return "p"; } };
+var o = Object.create(p);
+o.x = true;
+delete o.x; // slow case object
+
+var u = { x: 0, f: function () { return "u"; } }; // object with some other map
+
+function F(x) {
+ return x.f();
+}
+
+// First make CALL IC in F go MEGAMORPHIC and ensure that we put the stub
+// that calls p.f (guarded by a negative dictionary lookup on the reciever)
+// into the stub cache
+assertEquals("p", F(o));
+assertEquals("p", F(o));
+assertEquals("u", F(u));
+assertEquals("p", F(o));
+assertEquals("u", F(u));
+
+// Optimize F. We will inline p.f into F guarded by map checked against
+// receiver which does not work for slow case objects.
+%OptimizeFunctionOnNextCall(F);
+assertEquals("p", F(o));
+
+// Add f to o. o's map will *not* change.
+o.f = function () { return "o"; };
+assertEquals("o", F(o));
diff --git a/deps/v8/test/mjsunit/regress/regress-2606.js b/deps/v8/test/mjsunit/regress/regress-2606.js
new file mode 100644
index 000000000..b704f7d1e
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2606.js
@@ -0,0 +1,61 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check baseline for __proto__.
+var desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertEquals("function", typeof desc.get);
+assertEquals("function", typeof desc.set);
+
+// Check redefining getter for __proto__.
+function replaced_get() {};
+Object.defineProperty(Object.prototype, "__proto__", { get:replaced_get });
+desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertSame(replaced_get, desc.get);
+
+// Check redefining setter for __proto__.
+function replaced_set(x) {};
+Object.defineProperty(Object.prototype, "__proto__", { set:replaced_set });
+desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertFalse(desc.enumerable);
+assertTrue(desc.configurable);
+assertSame(replaced_set, desc.set);
+
+// Check changing configurability of __proto__.
+Object.defineProperty(Object.prototype, "__proto__", { configurable:false });
+desc = Object.getOwnPropertyDescriptor(Object.prototype, "__proto__");
+assertFalse(desc.enumerable);
+assertFalse(desc.configurable);
+assertSame(replaced_get, desc.get);
+assertSame(replaced_set, desc.set);
+
+// Check freezing Object.prototype completely.
+Object.freeze(Object.prototype);
+assertTrue(Object.isFrozen(Object.prototype));
diff --git a/deps/v8/test/mjsunit/regress/regress-2612.js b/deps/v8/test/mjsunit/regress/regress-2612.js
new file mode 100644
index 000000000..06db07733
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2612.js
@@ -0,0 +1,76 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax --nodead-code-elimination
+// Flags: --nofold-constants --nouse-gvn
+
+// Create a function to get a long series of removable simulates.
+// f() {
+// var _0 = <random>, _1 = <random>, ... _1000 = <random>,
+// _1001 = <random var> + <random var>,
+// _1002 = <random var> + <random var>,
+// ...
+// _99999 = <random var> + <random var>,
+// x = 1;
+// return _0;
+// }
+
+var seed = 1;
+
+function rand() {
+ seed = seed * 171 % 1337 + 17;
+ return (seed % 1000) / 1000;
+}
+
+function randi(max) {
+ seed = seed * 131 % 1773 + 13;
+ return seed % max;
+}
+
+function varname(i) {
+ return "_" + i;
+}
+
+var source = "var ";
+
+for (var i = 0; i < 1000; i++) {
+ source += [varname(i), "=", rand(), ","].join("");
+}
+
+for (var i = 1000; i < 100000; i++) {
+ source += [varname(i), "=",
+ varname(randi(i)), "+",
+ varname(randi(i)), ","].join("");
+}
+
+source += "x=1; return _0;"
+var f = new Function(source);
+
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2618.js b/deps/v8/test/mjsunit/regress/regress-2618.js
new file mode 100644
index 000000000..638b71e62
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2618.js
@@ -0,0 +1,74 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --use-osr --allow-natives-syntax
+
+function f() {
+ do {
+ do {
+ for (i = 0; i < 10000000; i++) {
+ // This should run long enough to trigger OSR.
+ }
+ } while (false);
+ } while (false);
+}
+
+f();
+assertTrue(%GetOptimizationStatus(f) != 2);
+
+
+function g() {
+ for (var i = 0; i < 1; i++) { }
+
+ do {
+ do {
+ for (i = 0; i < 1; i++) { }
+ } while (false);
+ } while (false);
+
+ do {
+ do {
+ do {
+ do {
+ do {
+ do {
+ do {
+ do {
+ for (i = 0; i < 10000000; i++) { }
+ } while (false);
+ } while (false);
+ } while (false);
+ } while (false);
+ } while (false);
+ } while (false);
+ } while (false);
+ } while (false);
+}
+
+g();
+assertTrue(%GetOptimizationStatus(g) != 2);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-2624.js b/deps/v8/test/mjsunit/regress/regress-2624.js
new file mode 100644
index 000000000..2bfd7b258
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-2624.js
@@ -0,0 +1,36 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --print-code
+
+var source = '"snowmen invasion " + "';
+for(var i = 0; i < 800; i++) {
+ source += '\u2603';
+}
+source += '"';
+eval(source);
+
diff --git a/deps/v8/test/mjsunit/regress/regress-581.js b/deps/v8/test/mjsunit/regress/regress-581.js
new file mode 100644
index 000000000..65cd87de0
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-581.js
@@ -0,0 +1,46 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var pow30 = Math.pow(2, 30);
+var pow31 = Math.pow(2, 31);
+
+var a = [];
+a[pow31] = 31;
+
+assertEquals(pow31 + 1, a.length);
+assertThrows(function() { a.concat(a); }, RangeError);
+
+var b = [];
+b[pow31 - 2] = 32;
+var ab = a.concat(b);
+assertEquals(2 * pow31 - 1, ab.length);
+assertEquals(31, ab[pow31]);
+assertEquals(32, ab[2 * pow31 - 1]);
+
+var c = [];
+c[pow30] = 30;
+assertThrows(function() { c.concat(c, a); }, RangeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-229923.js b/deps/v8/test/mjsunit/regress/regress-crbug-229923.js
new file mode 100644
index 000000000..95c0dedef
--- /dev/null
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-229923.js
@@ -0,0 +1,41 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following
+// disclaimer in the documentation and/or other materials provided
+// with the distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived
+// from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-externalize-string
+
+var slice = "slow path of JSON.stringify for sliced string".substring(1);
+assertEquals('"' + slice + '"', JSON.stringify(slice, null, 0));
+
+var parent = "external string turned into two byte";
+var slice_of_external = parent.substring(1);
+try {
+ // Turn the string to a two-byte external string, so that the sliced
+ // string looks like one-byte, but its parent is actually two-byte.
+ externalizeString(parent, true);
+} catch (e) { }
+assertEquals('"' + slice_of_external + '"',
+ JSON.stringify(slice_of_external, null, 0));
diff --git a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js b/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
index c0a71bf4a..4b355ae1a 100644
--- a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
+++ b/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
@@ -39,3 +39,13 @@ json1 = JSON.stringify(a);
json2 = JSON.stringify(a);
assertTrue(json1 == json2, "GC caused JSON.stringify to fail.");
+// Check that the slow path of JSON.stringify works correctly wrt GC.
+for (var i = 0; i < 100000; i++) {
+ var s = i.toString();
+ assertEquals('"' + s + '"', JSON.stringify(s, null, 0));
+}
+
+for (var i = 0; i < 100000; i++) {
+ var s = i.toString() + "\u2603";
+ assertEquals('"' + s + '"', JSON.stringify(s, null, 0));
+}
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index b11755322..d28520504 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -43,13 +43,16 @@
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_snapshot'],
+ 'dependencies': ['v8_base.<(v8_target_arch)', 'v8_snapshot'],
},
{
# The dependency on v8_base should come from a transitive
# dependency however the Android toolchain requires libv8_base.a
# to appear before libv8_snapshot.a so it's listed explicitly.
- 'dependencies': ['v8_base', 'v8_nosnapshot'],
+ 'dependencies': [
+ 'v8_base.<(v8_target_arch)',
+ 'v8_nosnapshot.<(v8_target_arch)',
+ ],
}],
['component=="shared_library"', {
'type': '<(component)',
@@ -105,10 +108,13 @@
'conditions': [
['want_separate_host_toolset==1', {
'toolsets': ['host', 'target'],
- 'dependencies': ['mksnapshot#host', 'js2c#host'],
+ 'dependencies': [
+ 'mksnapshot.<(v8_target_arch)#host',
+ 'js2c#host',
+ ],
}, {
'toolsets': ['target'],
- 'dependencies': ['mksnapshot', 'js2c'],
+ 'dependencies': ['mksnapshot.<(v8_target_arch)', 'js2c'],
}],
['component=="shared_library"', {
'defines': [
@@ -124,7 +130,7 @@
}],
],
'dependencies': [
- 'v8_base',
+ 'v8_base.<(v8_target_arch)',
],
'include_dirs+': [
'../../src',
@@ -138,7 +144,7 @@
{
'action_name': 'run_mksnapshot',
'inputs': [
- '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
+ '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot.<(v8_target_arch)<(EXECUTABLE_SUFFIX)',
],
'outputs': [
'<(INTERMEDIATE_DIR)/snapshot.cc',
@@ -149,40 +155,6 @@
'--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
],
},
- 'conditions': [
- ['v8_target_arch=="arm"', {
- # The following rules should be consistent with chromium's
- # common.gypi and V8's runtime rule to ensure they all generate
- # the same correct machine code. The following issue is about
- # V8's runtime rule about vfpv3 and neon:
- # http://code.google.com/p/v8/issues/detail?id=914
- 'conditions': [
- ['armv7==1', {
- # The ARM Architecture Manual mandates VFPv3 if NEON is
- # available.
- # V8 does not use d16-d31 unless explicitly enabled
- # (--enable_32dregs) or detected at run-time, so for vfpv3-d16,
- # we can also enable vfp3 for the better performance.
- 'conditions': [
- ['arm_neon!=1 and arm_fpu!="vfpv3" and arm_fpu!="vfpv3-d16"', {
- 'variables': {
- 'mksnapshot_flags': [
- '--noenable_vfp3',
- ],
- },
- }],
- ],
- },{ # else: armv7!=1
- 'variables': {
- 'mksnapshot_flags': [
- '--noenable_armv7',
- '--noenable_vfp3',
- ],
- },
- }],
- ],
- }],
- ],
'action': [
'<@(_inputs)',
'<@(mksnapshot_flags)',
@@ -192,10 +164,10 @@
],
},
{
- 'target_name': 'v8_nosnapshot',
+ 'target_name': 'v8_nosnapshot.<(v8_target_arch)',
'type': 'static_library',
'dependencies': [
- 'v8_base',
+ 'v8_base.<(v8_target_arch)',
],
'include_dirs+': [
'../../src',
@@ -222,7 +194,7 @@
]
},
{
- 'target_name': 'v8_base',
+ 'target_name': 'v8_base.<(v8_target_arch)',
'type': 'static_library',
'variables': {
'optimize': 'max',
@@ -441,6 +413,8 @@
'../../src/runtime.h',
'../../src/safepoint-table.cc',
'../../src/safepoint-table.h',
+ '../../src/sampler.cc',
+ '../../src/sampler.h',
'../../src/scanner-character-streams.cc',
'../../src/scanner-character-streams.h',
'../../src/scanner.cc',
@@ -741,15 +715,43 @@
]},
],
['OS=="win"', {
- 'sources': [
- '../../src/platform-win32.cc',
- '../../src/win32-math.cc',
- '../../src/win32-math.h',
- ],
- 'msvs_disabled_warnings': [4351, 4355, 4800],
- 'link_settings': {
- 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
+ 'variables': {
+ 'gyp_generators': '<!(echo $GYP_GENERATORS)',
},
+ 'conditions': [
+ ['gyp_generators=="make"', {
+ 'variables': {
+ 'build_env': '<!(uname -o)',
+ },
+ 'conditions': [
+ ['build_env=="Cygwin"', {
+ 'sources': [
+ '../../src/platform-cygwin.cc',
+ '../../src/platform-posix.cc',
+ ],
+ }, {
+ 'sources': [
+ '../../src/platform-win32.cc',
+ '../../src/win32-math.h',
+ '../../src/win32-math.cc',
+ ],
+ }],
+ ],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm', '-lws2_32' ],
+ },
+ }, {
+ 'sources': [
+ '../../src/platform-win32.cc',
+ '../../src/win32-math.h',
+ '../../src/win32-math.cc',
+ ],
+ 'msvs_disabled_warnings': [4351, 4355, 4800],
+ 'link_settings': {
+ 'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
+ },
+ }],
+ ],
}],
['component=="shared_library"', {
'defines': [
@@ -798,7 +800,8 @@
'../../src/proxy.js',
'../../src/collection.js',
'../../src/object-observe.js',
- '../../src/typedarray.js'
+ '../../src/typedarray.js',
+ '../../src/generator.js'
],
},
'actions': [
@@ -869,11 +872,11 @@
]
},
{
- 'target_name': 'mksnapshot',
+ 'target_name': 'mksnapshot.<(v8_target_arch)',
'type': 'executable',
'dependencies': [
- 'v8_base',
- 'v8_nosnapshot',
+ 'v8_base.<(v8_target_arch)',
+ 'v8_nosnapshot.<(v8_target_arch)',
],
'include_dirs+': [
'../../src',
@@ -920,73 +923,6 @@
}],
],
},
- {
- 'target_name': 'preparser_lib',
- 'type': 'static_library',
- 'include_dirs+': [
- '../../src',
- ],
- 'sources': [
- '../../include/v8-preparser.h',
- '../../include/v8stdint.h',
- '../../src/allocation.cc',
- '../../src/allocation.h',
- '../../src/atomicops.h',
- '../../src/atomicops_internals_x86_gcc.cc',
- '../../src/bignum.cc',
- '../../src/bignum.h',
- '../../src/bignum-dtoa.cc',
- '../../src/bignum-dtoa.h',
- '../../src/cached-powers.cc',
- '../../src/cached-powers.h',
- '../../src/char-predicates-inl.h',
- '../../src/char-predicates.h',
- '../../src/checks.h',
- '../../src/conversions-inl.h',
- '../../src/conversions.cc',
- '../../src/conversions.h',
- '../../src/diy-fp.cc',
- '../../src/diy-fp.h',
- '../../src/double.h',
- '../../src/dtoa.cc',
- '../../src/dtoa.h',
- '../../src/fast-dtoa.cc',
- '../../src/fast-dtoa.h',
- '../../src/fixed-dtoa.cc',
- '../../src/fixed-dtoa.h',
- '../../src/globals.h',
- '../../src/hashmap.h',
- '../../src/list-inl.h',
- '../../src/list.h',
- '../../src/once.cc',
- '../../src/once.h',
- '../../src/preparse-data-format.h',
- '../../src/preparse-data.cc',
- '../../src/preparse-data.h',
- '../../src/preparser.cc',
- '../../src/preparser.h',
- '../../src/preparser-api.cc',
- '../../src/scanner.cc',
- '../../src/scanner.h',
- '../../src/strtod.cc',
- '../../src/strtod.h',
- '../../src/token.cc',
- '../../src/token.h',
- '../../src/unicode-inl.h',
- '../../src/unicode.cc',
- '../../src/unicode.h',
- '../../src/utils-inl.h',
- '../../src/utils.cc',
- '../../src/utils.h',
- ],
- 'conditions': [
- ['OS=="win"', {
- 'sources': [
- '../../src/win32-math.cc',
- '../../src/win32-math.h',
- ]}],
- ],
- },
],
}, { # use_system_v8 != 0
'targets': [
diff --git a/deps/v8/tools/mingw-generate-makefiles.sh b/deps/v8/tools/mingw-generate-makefiles.sh
new file mode 100755
index 000000000..32af52d39
--- /dev/null
+++ b/deps/v8/tools/mingw-generate-makefiles.sh
@@ -0,0 +1,97 @@
+#!/bin/sh
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# Monkey-patch GYP.
+cat > build/gyp/gyp.mingw << EOF
+#!/usr/bin/env python
+
+# Copyright (c) 2009 Google Inc. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+# TODO(mark): sys.path manipulation is some temporary testing stuff.
+try:
+ import gyp
+except ImportError, e:
+ import os.path
+ sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), 'pylib'))
+ import gyp
+
+def MonkeyBuildFileTargets(target_list, build_file):
+ """From a target_list, returns the subset from the specified build_file.
+ """
+ build_file = build_file.replace('/', '\\\\')
+ return [p for p in target_list if gyp.common.BuildFile(p) == build_file]
+gyp.common.BuildFileTargets = MonkeyBuildFileTargets
+
+import gyp.generator.make
+import os
+def Monkey_ITIP(self):
+ """Returns the location of the final output for an installable target."""
+ sep = os.path.sep
+ # Xcode puts shared_library results into PRODUCT_DIR, and some gyp files
+ # rely on this. Emulate this behavior for mac.
+ if (self.type == 'shared_library' and
+ (self.flavor != 'mac' or self.toolset != 'target')):
+ # Install all shared libs into a common directory (per toolset) for
+ # convenient access with LD_LIBRARY_PATH.
+ return '\$(builddir)%slib.%s%s%s' % (sep, self.toolset, sep, self.alias)
+ return '\$(builddir)' + sep + self.alias
+gyp.generator.make.MakefileWriter._InstallableTargetInstallPath = Monkey_ITIP
+
+if __name__ == '__main__':
+ sys.exit(gyp.main(sys.argv[1:]))
+EOF
+
+# Delete old generated Makefiles.
+find out -name '*.mk' -or -name 'Makefile*' -exec rm {} \;
+
+# Generate fresh Makefiles.
+mv build/gyp/gyp build/gyp/gyp.original
+mv build/gyp/gyp.mingw build/gyp/gyp
+make out/Makefile.ia32
+mv build/gyp/gyp build/gyp/gyp.mingw
+mv build/gyp/gyp.original build/gyp/gyp
+
+# Patch generated Makefiles: replace most backslashes with forward slashes,
+# fix library names in linker flags.
+FILES=$(find out -name '*.mk' -or -name 'Makefile*')
+for F in $FILES ; do
+ echo "Patching $F..."
+ cp $F $F.orig
+ cat $F.orig \
+ | sed -e 's|\([)a-zA-Z0-9]\)\\\([a-zA-Z]\)|\1/\2|g' \
+ -e 's|\([)a-zA-Z0-9]\)\\\\\([a-zA-Z]\)|\1/\2|g' \
+ -e 's|'%s/n'|'%s\\\\n'|g' \
+ -e 's|-lwinmm\.lib|-lwinmm|g' \
+ -e 's|-lws2_32\.lib|-lws2_32|g' \
+ > $F
+ rm $F.orig
+done
diff --git a/deps/v8/tools/nacl-run.py b/deps/v8/tools/nacl-run.py
new file mode 100755
index 000000000..135172caf
--- /dev/null
+++ b/deps/v8/tools/nacl-run.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python
+#
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This script executes the passed command line using the Native Client
+# 'sel_ldr' container. It is derived from android-run.py.
+
+import os
+from os.path import join, dirname, abspath
+import subprocess
+import sys
+import tempfile
+
+def Check(output, errors):
+ failed = any([s.startswith('/system/bin/sh:') or s.startswith('ANDROID')
+ for s in output.split('\n')])
+ return 1 if failed else 0
+
+def Execute(cmdline):
+ (fd_out, outname) = tempfile.mkstemp()
+ (fd_err, errname) = tempfile.mkstemp()
+ process = subprocess.Popen(
+ args=cmdline,
+ shell=True,
+ stdout=fd_out,
+ stderr=fd_err,
+ )
+ exit_code = process.wait()
+ os.close(fd_out)
+ os.close(fd_err)
+ output = file(outname).read()
+ errors = file(errname).read()
+ os.unlink(outname)
+ os.unlink(errname)
+ sys.stdout.write(output)
+ sys.stderr.write(errors)
+ return exit_code or Check(output, errors)
+
+def Escape(arg):
+ def ShouldEscape():
+ for x in arg:
+ if not x.isalnum() and x != '-' and x != '_':
+ return True
+ return False
+
+ return arg if not ShouldEscape() else '"%s"' % (arg.replace('"', '\\"'))
+
+def WriteToTemporaryFile(data):
+ (fd, fname) = tempfile.mkstemp()
+ os.close(fd)
+ tmp_file = open(fname, "w")
+ tmp_file.write(data)
+ tmp_file.close()
+ return fname
+
+def GetNaClArchFromNexe(nexe):
+ try:
+ p = subprocess.Popen(['file', nexe], stdout=subprocess.PIPE)
+ out, err = p.communicate()
+ lines = out.split('\n')
+ if lines[0].find(": ELF 32-bit LSB executable, Intel 80386") > 0:
+ return "x86_32"
+ if lines[0].find(": ELF 64-bit LSB executable, x86-64") > 0:
+ return "x86_64"
+ except:
+ print 'file ' + sys.argv[1] + ' failed'
+ return None
+
+def GetNaClResources(nexe):
+ nacl_sdk_dir = os.environ["NACL_SDK_ROOT"]
+ nacl_arch = GetNaClArchFromNexe(nexe)
+ if sys.platform.startswith("linux"):
+ platform = "linux"
+ elif sys.platform == "darwin":
+ platform = "mac"
+ else:
+ print("NaCl V8 testing is supported on Linux and MacOS only.")
+ sys.exit(1)
+
+ if nacl_arch is "x86_64":
+ toolchain = platform + "_x86_glibc"
+ sel_ldr = "sel_ldr_x86_64"
+ irt = "irt_core_x86_64.nexe"
+ libdir = "lib64"
+ elif nacl_arch is "x86_32":
+ toolchain = platform + "_x86_glibc"
+ sel_ldr = "sel_ldr_x86_32"
+ irt = "irt_core_x86_32.nexe"
+ libdir = "lib32"
+ elif nacl_arch is "arm":
+ print("NaCl V8 ARM support is not ready yet.")
+ sys.exit(1)
+ else:
+ print("Invalid nexe %s" % nexe)
+ sys.exit(1)
+
+ nacl_sel_ldr = os.path.join(nacl_sdk_dir, "tools", sel_ldr)
+ nacl_irt = os.path.join(nacl_sdk_dir, "tools", irt)
+ nacl_ld_so = os.path.join(nacl_sdk_dir, "toolchain", toolchain,
+ "x86_64-nacl", libdir, "runnable-ld.so")
+ nacl_lib_path = os.path.join(nacl_sdk_dir, "toolchain", toolchain,
+ "x86_64-nacl", libdir)
+
+ return (nacl_sdk_dir, nacl_sel_ldr, nacl_irt, nacl_ld_so, nacl_lib_path)
+
+def Main():
+ if (len(sys.argv) == 1):
+ print("Usage: %s <command-to-run-on-device>" % sys.argv[0])
+ return 1
+
+ args = [Escape(arg) for arg in sys.argv[1:]]
+
+ (nacl_sdk_dir, nacl_sel_ldr, nacl_irt, nacl_ld_so,
+ nacl_lib_path) = GetNaClResources(sys.argv[1])
+
+ # sel_ldr Options:
+ # -c -c: disable validation (for performance)
+ # -a: allow file access
+ # -B <irt>: load the IRT
+ command = ' '.join([nacl_sel_ldr, '-c', '-c', '-a', '-B', nacl_irt, '--',
+ nacl_ld_so, '--library-path', nacl_lib_path] + args)
+ error_code = Execute(command)
+ return error_code
+
+if __name__ == '__main__':
+ sys.exit(Main())
diff --git a/deps/v8/tools/presubmit.py b/deps/v8/tools/presubmit.py
index efa8724e7..616505f3c 100755
--- a/deps/v8/tools/presubmit.py
+++ b/deps/v8/tools/presubmit.py
@@ -228,6 +228,15 @@ class CppLintProcessor(SourceFileProcessor):
def GetPathsToSearch(self):
return ['src', 'preparser', 'include', 'samples', join('test', 'cctest')]
+ def GetCpplintScript(self, prio_path):
+ for path in [prio_path] + os.environ["PATH"].split(os.pathsep):
+ path = path.strip('"')
+ cpplint = os.path.join(path, "cpplint.py")
+ if os.path.isfile(cpplint):
+ return cpplint
+
+ return None
+
def ProcessFiles(self, files, path):
good_files_cache = FileContentsCache('.cpplint-cache')
good_files_cache.Load()
@@ -237,10 +246,14 @@ class CppLintProcessor(SourceFileProcessor):
return True
filt = '-,' + ",".join(['+' + n for n in ENABLED_LINT_RULES])
- command = ['cpplint.py', '--filter', filt]
- local_cpplint = join(path, "tools", "cpplint.py")
- if exists(local_cpplint):
- command = ['python', local_cpplint, '--filter', filt]
+ command = [sys.executable, 'cpplint.py', '--filter', filt]
+ cpplint = self.GetCpplintScript(join(path, "tools"))
+ if cpplint is None:
+ print('Could not find cpplint.py. Make sure '
+ 'depot_tools is installed and in the path.')
+ sys.exit(1)
+
+ command = [sys.executable, cpplint, '--filter', filt]
commands = join([command + [file] for file in files])
count = multiprocessing.cpu_count()
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index cb64b45b0..959fe4857 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -68,12 +68,16 @@ SUPPORTED_ARCHS = ["android_arm",
"arm",
"ia32",
"mipsel",
+ "nacl_ia32",
+ "nacl_x64",
"x64"]
# Double the timeout for these:
SLOW_ARCHS = ["android_arm",
"android_ia32",
"arm",
- "mipsel"]
+ "mipsel",
+ "nacl_ia32",
+ "nacl_x64"]
def BuildOptions():
@@ -145,6 +149,10 @@ def BuildOptions():
default=False, action="store_true")
result.add_option("--warn-unused", help="Report unused rules",
default=False, action="store_true")
+ result.add_option("--junitout", help="File name of the JUnit output")
+ result.add_option("--junittestsuite",
+ help="The testsuite name in the JUnit output file",
+ default="v8tests")
return result
@@ -332,6 +340,9 @@ def Execute(arch, mode, args, options, suites, workspace):
try:
start_time = time.time()
progress_indicator = progress.PROGRESS_INDICATORS[options.progress]()
+ if options.junitout:
+ progress_indicator = progress.JUnitTestProgressIndicator(
+ progress_indicator, options.junitout, options.junittestsuite)
run_networked = not options.no_network
if not run_networked:
diff --git a/deps/v8/tools/testrunner/local/junit_output.py b/deps/v8/tools/testrunner/local/junit_output.py
new file mode 100644
index 000000000..437adb178
--- /dev/null
+++ b/deps/v8/tools/testrunner/local/junit_output.py
@@ -0,0 +1,49 @@
+# Copyright 2013 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import xml.etree.ElementTree as xml
+
+
+class JUnitTestOutput:
+ def __init__(self, test_suite_name):
+ self.root = xml.Element("testsuite")
+ self.root.attrib["name"] = test_suite_name
+
+ def HasRunTest(self, test_name, test_duration, test_failure):
+ testCaseElement = xml.Element("testcase")
+ testCaseElement.attrib["name"] = " ".join(test_name)
+ testCaseElement.attrib["time"] = str(round(test_duration, 3))
+ if len(test_failure):
+ failureElement = xml.Element("failure")
+ failureElement.text = test_failure
+ testCaseElement.append(failureElement)
+ self.root.append(testCaseElement)
+
+ def FinishAndWrite(self, file):
+ xml.ElementTree(self.root).write(file, "UTF-8")
+
diff --git a/deps/v8/tools/testrunner/local/progress.py b/deps/v8/tools/testrunner/local/progress.py
index 9075a954f..c13c0eb54 100644
--- a/deps/v8/tools/testrunner/local/progress.py
+++ b/deps/v8/tools/testrunner/local/progress.py
@@ -29,6 +29,8 @@
import sys
import time
+from . import junit_output
+
def EscapeCommand(command):
parts = []
for part in command:
@@ -230,6 +232,50 @@ class MonochromeProgressIndicator(CompactProgressIndicator):
print ("\r" + (" " * last_line_length) + "\r"),
+class JUnitTestProgressIndicator(ProgressIndicator):
+
+ def __init__(self, progress_indicator, junitout, junittestsuite):
+ self.progress_indicator = progress_indicator
+ self.outputter = junit_output.JUnitTestOutput(junittestsuite)
+ if junitout:
+ self.outfile = open(junitout, "w")
+ else:
+ self.outfile = sys.stdout
+
+ def Starting(self):
+ self.progress_indicator.runner = self.runner
+ self.progress_indicator.Starting()
+
+ def Done(self):
+ self.progress_indicator.Done()
+ self.outputter.FinishAndWrite(self.outfile)
+ if self.outfile != sys.stdout:
+ self.outfile.close()
+
+ def AboutToRun(self, test):
+ self.progress_indicator.AboutToRun(test)
+
+ def HasRun(self, test):
+ self.progress_indicator.HasRun(test)
+ fail_text = ""
+ if test.suite.HasUnexpectedOutput(test):
+ stdout = test.output.stdout.strip()
+ if len(stdout):
+ fail_text += "stdout:\n%s\n" % stdout
+ stderr = test.output.stderr.strip()
+ if len(stderr):
+ fail_text += "stderr:\n%s\n" % stderr
+ fail_text += "Command: %s" % EscapeCommand(self.runner.GetCommand(test))
+ if test.output.HasCrashed():
+ fail_text += "exit code: %d\n--- CRASHED ---" % test.output.exit_code
+ if test.output.HasTimedOut():
+ fail_text += "--- TIMEOUT ---"
+ self.outputter.HasRunTest(
+ [test.GetLabel()] + self.runner.context.mode_flags + test.flags,
+ test.duration,
+ fail_text)
+
+
PROGRESS_INDICATORS = {
'verbose': VerboseProgressIndicator,
'dots': DotsProgressIndicator,
diff --git a/deps/v8/tools/testrunner/local/statusfile.py b/deps/v8/tools/testrunner/local/statusfile.py
index bf1de45f6..634fe6a08 100644
--- a/deps/v8/tools/testrunner/local/statusfile.py
+++ b/deps/v8/tools/testrunner/local/statusfile.py
@@ -59,7 +59,7 @@ DEFS = {FAIL_OK: [FAIL, OKAY],
# Support arches, modes to be written as keywords instead of strings.
VARIABLES = {ALWAYS: True}
for var in ["debug", "release", "android_arm", "android_ia32", "arm", "ia32",
- "mipsel", "x64"]:
+ "mipsel", "x64", "nacl_ia32", "nacl_x64"]:
VARIABLES[var] = var
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index c9ee1011f..0ffe7342a 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -170,7 +170,7 @@ function TickProcessor(
processor: this.processSnapshotPosition },
'tick': {
parsers: [parseInt, parseInt, parseInt, parseInt,
- parseInt, parseInt, 'var-args'],
+ parseInt, 'var-args'],
processor: this.processTick },
'heap-sample-begin': { parsers: [null, null, parseInt],
processor: this.processHeapSampleBegin },
@@ -368,8 +368,7 @@ TickProcessor.prototype.includeTick = function(vmState) {
TickProcessor.prototype.processTick = function(pc,
sp,
ns_since_start,
- is_external_callback,
- tos_or_external_callback,
+ external_callback,
vmState,
stack) {
this.distortion += this.distortion_per_entry;
@@ -383,23 +382,15 @@ TickProcessor.prototype.processTick = function(pc,
this.ticks_.excluded++;
return;
}
- if (is_external_callback) {
+ if (external_callback) {
// Don't use PC when in external callback code, as it can point
// inside callback's code, and we will erroneously report
// that a callback calls itself. Instead we use tos_or_external_callback,
// as simply resetting PC will produce unaccounted ticks.
- pc = tos_or_external_callback;
- tos_or_external_callback = 0;
- } else if (tos_or_external_callback) {
- // Find out, if top of stack was pointing inside a JS function
- // meaning that we have encountered a frameless invocation.
- var funcEntry = this.profile_.findEntry(tos_or_external_callback);
- if (!funcEntry || !funcEntry.isJSFunction || !funcEntry.isJSFunction()) {
- tos_or_external_callback = 0;
- }
- }
+ pc = 0;
+ }
- this.profile_.recordTick(this.processStack(pc, tos_or_external_callback, stack));
+ this.profile_.recordTick(this.processStack(pc, external_callback, stack));
};