summaryrefslogtreecommitdiff
path: root/deps/v8
diff options
context:
space:
mode:
authorBen Noordhuis <info@bnoordhuis.nl>2013-02-25 22:45:23 +0100
committerBen Noordhuis <info@bnoordhuis.nl>2013-02-25 23:45:02 +0100
commitb15a10e7a014674ef6f71c51ad84032fb7b802e2 (patch)
tree3bb04a6cb05c7a37c385eda4521b8a9e7bcd736f /deps/v8
parent34046084c0665c8bb2dfd84683dcf29d7ffbad2d (diff)
downloadnode-b15a10e7a014674ef6f71c51ad84032fb7b802e2.tar.gz
deps: downgrade v8 to 3.14.5
V8 3.15 and newer have stability and performance issues. Roll back to a known-good version.
Diffstat (limited to 'deps/v8')
-rw-r--r--deps/v8/.gitignore5
-rw-r--r--deps/v8/AUTHORS3
-rw-r--r--deps/v8/ChangeLog158
-rw-r--r--deps/v8/build/android.gypi9
-rw-r--r--deps/v8/build/common.gypi52
-rw-r--r--deps/v8/include/v8-profiler.h17
-rw-r--r--deps/v8/include/v8.h630
-rw-r--r--deps/v8/samples/shell.cc27
-rw-r--r--deps/v8/src/accessors.cc104
-rw-r--r--deps/v8/src/api.cc380
-rw-r--r--deps/v8/src/api.h6
-rw-r--r--deps/v8/src/arm/assembler-arm-inl.h31
-rw-r--r--deps/v8/src/arm/assembler-arm.cc71
-rw-r--r--deps/v8/src/arm/assembler-arm.h54
-rw-r--r--deps/v8/src/arm/builtins-arm.cc33
-rw-r--r--deps/v8/src/arm/code-stubs-arm.cc894
-rw-r--r--deps/v8/src/arm/code-stubs-arm.h123
-rw-r--r--deps/v8/src/arm/codegen-arm.cc256
-rw-r--r--deps/v8/src/arm/codegen-arm.h16
-rw-r--r--deps/v8/src/arm/constants-arm.h15
-rw-r--r--deps/v8/src/arm/deoptimizer-arm.cc32
-rw-r--r--deps/v8/src/arm/disasm-arm.cc12
-rw-r--r--deps/v8/src/arm/full-codegen-arm.cc212
-rw-r--r--deps/v8/src/arm/ic-arm.cc38
-rw-r--r--deps/v8/src/arm/lithium-arm.cc257
-rw-r--r--deps/v8/src/arm/lithium-arm.h213
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.cc569
-rw-r--r--deps/v8/src/arm/lithium-codegen-arm.h6
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.cc86
-rw-r--r--deps/v8/src/arm/macro-assembler-arm.h30
-rw-r--r--deps/v8/src/arm/regexp-macro-assembler-arm.cc4
-rw-r--r--deps/v8/src/arm/simulator-arm.cc112
-rw-r--r--deps/v8/src/arm/simulator-arm.h4
-rw-r--r--deps/v8/src/arm/stub-cache-arm.cc132
-rw-r--r--deps/v8/src/array.js18
-rw-r--r--deps/v8/src/assembler.cc187
-rw-r--r--deps/v8/src/assembler.h75
-rw-r--r--deps/v8/src/ast.cc20
-rw-r--r--deps/v8/src/ast.h38
-rw-r--r--deps/v8/src/atomicops.h4
-rw-r--r--deps/v8/src/atomicops_internals_tsan.h335
-rw-r--r--deps/v8/src/bootstrapper.cc23
-rw-r--r--deps/v8/src/bootstrapper.h2
-rw-r--r--deps/v8/src/builtins.cc702
-rw-r--r--deps/v8/src/builtins.h31
-rw-r--r--deps/v8/src/code-stubs.cc181
-rw-r--r--deps/v8/src/code-stubs.h258
-rw-r--r--deps/v8/src/codegen.cc1
-rw-r--r--deps/v8/src/codegen.h14
-rw-r--r--deps/v8/src/collection.js46
-rw-r--r--deps/v8/src/compilation-cache.cc2
-rw-r--r--deps/v8/src/compiler.cc119
-rw-r--r--deps/v8/src/compiler.h29
-rw-r--r--deps/v8/src/contexts.cc25
-rw-r--r--deps/v8/src/contexts.h24
-rw-r--r--deps/v8/src/counters.cc7
-rw-r--r--deps/v8/src/d8.cc407
-rw-r--r--deps/v8/src/d8.h28
-rw-r--r--deps/v8/src/date.js2
-rw-r--r--deps/v8/src/debug-debugger.js121
-rw-r--r--deps/v8/src/debug.cc16
-rw-r--r--deps/v8/src/deoptimizer.cc114
-rw-r--r--deps/v8/src/deoptimizer.h29
-rw-r--r--deps/v8/src/elements-kind.cc9
-rw-r--r--deps/v8/src/elements-kind.h8
-rw-r--r--deps/v8/src/elements.cc546
-rw-r--r--deps/v8/src/elements.h43
-rw-r--r--deps/v8/src/execution.cc34
-rw-r--r--deps/v8/src/execution.h7
-rw-r--r--deps/v8/src/extensions/externalize-string-extension.cc5
-rw-r--r--deps/v8/src/extensions/gc-extension.cc6
-rw-r--r--deps/v8/src/factory.cc31
-rw-r--r--deps/v8/src/factory.h9
-rw-r--r--deps/v8/src/flag-definitions.h29
-rw-r--r--deps/v8/src/frames.cc4
-rw-r--r--deps/v8/src/full-codegen.cc240
-rw-r--r--deps/v8/src/full-codegen.h19
-rw-r--r--deps/v8/src/global-handles.cc95
-rw-r--r--deps/v8/src/global-handles.h20
-rw-r--r--deps/v8/src/handles.cc29
-rw-r--r--deps/v8/src/handles.h11
-rw-r--r--deps/v8/src/heap-inl.h14
-rw-r--r--deps/v8/src/heap-profiler.cc43
-rw-r--r--deps/v8/src/heap-profiler.h32
-rw-r--r--deps/v8/src/heap.cc439
-rw-r--r--deps/v8/src/heap.h62
-rw-r--r--deps/v8/src/hydrogen-instructions.cc498
-rw-r--r--deps/v8/src/hydrogen-instructions.h688
-rw-r--r--deps/v8/src/hydrogen.cc948
-rw-r--r--deps/v8/src/hydrogen.h72
-rw-r--r--deps/v8/src/ia32/assembler-ia32-inl.h32
-rw-r--r--deps/v8/src/ia32/assembler-ia32.cc107
-rw-r--r--deps/v8/src/ia32/assembler-ia32.h35
-rw-r--r--deps/v8/src/ia32/builtins-ia32.cc36
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.cc812
-rw-r--r--deps/v8/src/ia32/code-stubs-ia32.h90
-rw-r--r--deps/v8/src/ia32/codegen-ia32.cc203
-rw-r--r--deps/v8/src/ia32/codegen-ia32.h14
-rw-r--r--deps/v8/src/ia32/deoptimizer-ia32.cc30
-rw-r--r--deps/v8/src/ia32/disasm-ia32.cc9
-rw-r--r--deps/v8/src/ia32/full-codegen-ia32.cc198
-rw-r--r--deps/v8/src/ia32/ic-ia32.cc36
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.cc359
-rw-r--r--deps/v8/src/ia32/lithium-codegen-ia32.h6
-rw-r--r--deps/v8/src/ia32/lithium-ia32.cc220
-rw-r--r--deps/v8/src/ia32/lithium-ia32.h174
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.cc57
-rw-r--r--deps/v8/src/ia32/macro-assembler-ia32.h4
-rw-r--r--deps/v8/src/ia32/regexp-macro-assembler-ia32.cc4
-rw-r--r--deps/v8/src/ia32/stub-cache-ia32.cc100
-rw-r--r--deps/v8/src/ic-inl.h3
-rw-r--r--deps/v8/src/ic.cc339
-rw-r--r--deps/v8/src/ic.h35
-rw-r--r--deps/v8/src/incremental-marking-inl.h27
-rw-r--r--deps/v8/src/incremental-marking.cc261
-rw-r--r--deps/v8/src/incremental-marking.h21
-rw-r--r--deps/v8/src/interface.cc13
-rw-r--r--deps/v8/src/interface.h35
-rw-r--r--deps/v8/src/isolate.cc178
-rw-r--r--deps/v8/src/isolate.h21
-rw-r--r--deps/v8/src/json-parser.h152
-rw-r--r--deps/v8/src/json-stringifier.h748
-rw-r--r--deps/v8/src/json.js143
-rw-r--r--deps/v8/src/jsregexp.cc6
-rw-r--r--deps/v8/src/lithium.h4
-rw-r--r--deps/v8/src/liveedit-debugger.js37
-rw-r--r--deps/v8/src/liveedit.cc196
-rw-r--r--deps/v8/src/liveobjectlist.cc2
-rw-r--r--deps/v8/src/log-utils.cc9
-rw-r--r--deps/v8/src/log.cc198
-rw-r--r--deps/v8/src/log.h44
-rw-r--r--deps/v8/src/macros.py2
-rw-r--r--deps/v8/src/mark-compact.cc237
-rw-r--r--deps/v8/src/mark-compact.h43
-rw-r--r--deps/v8/src/math.js30
-rw-r--r--deps/v8/src/messages.cc4
-rw-r--r--deps/v8/src/messages.js420
-rw-r--r--deps/v8/src/mips/assembler-mips-inl.h23
-rw-r--r--deps/v8/src/mips/assembler-mips.cc54
-rw-r--r--deps/v8/src/mips/assembler-mips.h25
-rw-r--r--deps/v8/src/mips/builtins-mips.cc42
-rw-r--r--deps/v8/src/mips/code-stubs-mips.cc909
-rw-r--r--deps/v8/src/mips/code-stubs-mips.h127
-rw-r--r--deps/v8/src/mips/codegen-mips.cc266
-rw-r--r--deps/v8/src/mips/codegen-mips.h16
-rw-r--r--deps/v8/src/mips/deoptimizer-mips.cc8
-rw-r--r--deps/v8/src/mips/full-codegen-mips.cc204
-rw-r--r--deps/v8/src/mips/ic-mips.cc39
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.cc576
-rw-r--r--deps/v8/src/mips/lithium-codegen-mips.h6
-rw-r--r--deps/v8/src/mips/lithium-mips.cc222
-rw-r--r--deps/v8/src/mips/lithium-mips.h176
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.cc92
-rw-r--r--deps/v8/src/mips/macro-assembler-mips.h26
-rw-r--r--deps/v8/src/mips/regexp-macro-assembler-mips.cc4
-rw-r--r--deps/v8/src/mips/simulator-mips.cc110
-rw-r--r--deps/v8/src/mips/simulator-mips.h5
-rw-r--r--deps/v8/src/mips/stub-cache-mips.cc194
-rw-r--r--deps/v8/src/mirror-debugger.js25
-rw-r--r--deps/v8/src/object-observe.js242
-rw-r--r--deps/v8/src/objects-debug.cc9
-rw-r--r--deps/v8/src/objects-inl.h517
-rw-r--r--deps/v8/src/objects-printer.cc11
-rw-r--r--deps/v8/src/objects-visiting-inl.h69
-rw-r--r--deps/v8/src/objects-visiting.cc4
-rw-r--r--deps/v8/src/objects-visiting.h17
-rw-r--r--deps/v8/src/objects.cc1520
-rw-r--r--deps/v8/src/objects.h357
-rw-r--r--deps/v8/src/optimizing-compiler-thread.cc25
-rw-r--r--deps/v8/src/optimizing-compiler-thread.h20
-rw-r--r--deps/v8/src/parser.cc128
-rw-r--r--deps/v8/src/parser.h1
-rw-r--r--deps/v8/src/platform-cygwin.cc28
-rw-r--r--deps/v8/src/platform-freebsd.cc51
-rw-r--r--deps/v8/src/platform-linux.cc57
-rw-r--r--deps/v8/src/platform-macos.cc27
-rw-r--r--deps/v8/src/platform-nullos.cc6
-rw-r--r--deps/v8/src/platform-openbsd.cc58
-rw-r--r--deps/v8/src/platform-posix.cc26
-rw-r--r--deps/v8/src/platform-solaris.cc67
-rw-r--r--deps/v8/src/platform-win32.cc37
-rw-r--r--deps/v8/src/platform.h11
-rw-r--r--deps/v8/src/preparser.h4
-rw-r--r--deps/v8/src/prettyprinter.cc15
-rw-r--r--deps/v8/src/profile-generator-inl.h2
-rw-r--r--deps/v8/src/profile-generator.cc102
-rw-r--r--deps/v8/src/profile-generator.h7
-rw-r--r--deps/v8/src/property-details.h4
-rw-r--r--deps/v8/src/property.cc2
-rw-r--r--deps/v8/src/property.h47
-rw-r--r--deps/v8/src/proxy.js9
-rw-r--r--deps/v8/src/regexp-macro-assembler.cc8
-rw-r--r--deps/v8/src/regexp-stack.cc1
-rw-r--r--deps/v8/src/regexp.js8
-rw-r--r--deps/v8/src/rewriter.cc9
-rw-r--r--deps/v8/src/runtime-profiler.cc20
-rw-r--r--deps/v8/src/runtime-profiler.h2
-rw-r--r--deps/v8/src/runtime.cc1381
-rw-r--r--deps/v8/src/runtime.h36
-rw-r--r--deps/v8/src/scopeinfo.cc29
-rw-r--r--deps/v8/src/scopeinfo.h67
-rw-r--r--deps/v8/src/scopes.cc170
-rw-r--r--deps/v8/src/scopes.h31
-rw-r--r--deps/v8/src/serialize.cc6
-rw-r--r--deps/v8/src/spaces-inl.h13
-rw-r--r--deps/v8/src/spaces.cc63
-rw-r--r--deps/v8/src/spaces.h71
-rw-r--r--deps/v8/src/store-buffer.h4
-rw-r--r--deps/v8/src/string.js59
-rw-r--r--deps/v8/src/stub-cache.cc84
-rw-r--r--deps/v8/src/stub-cache.h16
-rw-r--r--deps/v8/src/token.h1
-rw-r--r--deps/v8/src/type-info.cc165
-rw-r--r--deps/v8/src/type-info.h12
-rw-r--r--deps/v8/src/uri.js86
-rw-r--r--deps/v8/src/v8-counters.cc11
-rw-r--r--deps/v8/src/v8-counters.h17
-rw-r--r--deps/v8/src/v8.cc16
-rw-r--r--deps/v8/src/v8conversions.cc4
-rw-r--r--deps/v8/src/v8globals.h43
-rw-r--r--deps/v8/src/v8natives.js88
-rw-r--r--deps/v8/src/v8utils.h2
-rw-r--r--deps/v8/src/variables.cc6
-rw-r--r--deps/v8/src/variables.h4
-rw-r--r--deps/v8/src/version.cc6
-rw-r--r--deps/v8/src/vm-state-inl.h12
-rw-r--r--deps/v8/src/x64/assembler-x64-inl.h28
-rw-r--r--deps/v8/src/x64/assembler-x64.cc101
-rw-r--r--deps/v8/src/x64/assembler-x64.h40
-rw-r--r--deps/v8/src/x64/builtins-x64.cc40
-rw-r--r--deps/v8/src/x64/code-stubs-x64.cc654
-rw-r--r--deps/v8/src/x64/code-stubs-x64.h96
-rw-r--r--deps/v8/src/x64/codegen-x64.cc189
-rw-r--r--deps/v8/src/x64/codegen-x64.h15
-rw-r--r--deps/v8/src/x64/deoptimizer-x64.cc30
-rw-r--r--deps/v8/src/x64/disasm-x64.cc12
-rw-r--r--deps/v8/src/x64/full-codegen-x64.cc196
-rw-r--r--deps/v8/src/x64/ic-x64.cc35
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.cc521
-rw-r--r--deps/v8/src/x64/lithium-codegen-x64.h6
-rw-r--r--deps/v8/src/x64/lithium-x64.cc215
-rw-r--r--deps/v8/src/x64/lithium-x64.h160
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.cc57
-rw-r--r--deps/v8/src/x64/macro-assembler-x64.h3
-rw-r--r--deps/v8/src/x64/regexp-macro-assembler-x64.cc4
-rw-r--r--deps/v8/src/x64/stub-cache-x64.cc101
-rw-r--r--deps/v8/test/cctest/cctest.gyp1
-rw-r--r--deps/v8/test/cctest/cctest.h20
-rw-r--r--deps/v8/test/cctest/test-accessors.cc26
-rw-r--r--deps/v8/test/cctest/test-alloc.cc26
-rw-r--r--deps/v8/test/cctest/test-api.cc421
-rw-r--r--deps/v8/test/cctest/test-assembler-arm.cc4
-rw-r--r--deps/v8/test/cctest/test-compiler.cc3
-rw-r--r--deps/v8/test/cctest/test-debug.cc8
-rw-r--r--deps/v8/test/cctest/test-decls.cc170
-rw-r--r--deps/v8/test/cctest/test-dictionary.cc6
-rw-r--r--deps/v8/test/cctest/test-disasm-arm.cc6
-rw-r--r--deps/v8/test/cctest/test-heap-profiler.cc28
-rw-r--r--deps/v8/test/cctest/test-heap.cc493
-rw-r--r--deps/v8/test/cctest/test-lockers.cc7
-rw-r--r--deps/v8/test/cctest/test-log.cc8
-rw-r--r--deps/v8/test/cctest/test-mark-compact.cc1
-rw-r--r--deps/v8/test/cctest/test-object-observe.cc280
-rwxr-xr-xdeps/v8/test/cctest/test-parsing.cc86
-rw-r--r--deps/v8/test/cctest/test-regexp.cc20
-rw-r--r--deps/v8/test/mjsunit/array-bounds-check-removal.js24
-rw-r--r--deps/v8/test/mjsunit/array-natives-elements.js307
-rwxr-xr-xdeps/v8/test/mjsunit/array-reduce.js16
-rw-r--r--deps/v8/test/mjsunit/array-slice.js12
-rw-r--r--deps/v8/test/mjsunit/array-store-and-grow.js5
-rw-r--r--deps/v8/test/mjsunit/compiler/multiply-add.js69
-rw-r--r--deps/v8/test/mjsunit/compiler/proto-chain-load.js44
-rw-r--r--deps/v8/test/mjsunit/compiler/rotate.js224
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-compile-error.js60
-rw-r--r--deps/v8/test/mjsunit/debug-liveedit-literals.js94
-rw-r--r--deps/v8/test/mjsunit/debug-set-variable-value.js176
-rw-r--r--deps/v8/test/mjsunit/elements-kind.js3
-rw-r--r--deps/v8/test/mjsunit/elements-length-no-holey.js33
-rw-r--r--deps/v8/test/mjsunit/error-accessors.js54
-rw-r--r--deps/v8/test/mjsunit/error-constructors.js15
-rw-r--r--deps/v8/test/mjsunit/function-call.js32
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part1.js9
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part2.js9
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part3.js9
-rw-r--r--deps/v8/test/mjsunit/fuzz-natives-part4.js9
-rw-r--r--deps/v8/test/mjsunit/harmony/collections.js58
-rw-r--r--deps/v8/test/mjsunit/harmony/module-linking.js2
-rw-r--r--deps/v8/test/mjsunit/harmony/object-observe.js873
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies-json.js178
-rw-r--r--deps/v8/test/mjsunit/harmony/proxies.js5
-rw-r--r--deps/v8/test/mjsunit/json-parser-recursive.js33
-rw-r--r--deps/v8/test/mjsunit/json-stringify-recursive.js52
-rw-r--r--deps/v8/test/mjsunit/json.js36
-rw-r--r--deps/v8/test/mjsunit/json2.js153
-rw-r--r--deps/v8/test/mjsunit/manual-parallel-recompile.js79
-rw-r--r--deps/v8/test/mjsunit/math-exp-precision.js64
-rw-r--r--deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js1
-rw-r--r--deps/v8/test/mjsunit/mjsunit.status10
-rw-r--r--deps/v8/test/mjsunit/regress/regress-121407.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-164442.js45
-rw-r--r--deps/v8/test/mjsunit/regress/regress-166553.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1692.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-1980.js2
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2263.js30
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2315.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2398.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2410.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2416.js75
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2433.js36
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2437.js156
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2438.js52
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2443.js129
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2444.js120
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2489.js50
-rw-r--r--deps/v8/test/mjsunit/regress/regress-2499.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-492.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-135066.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-157019.js54
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-157520.js38
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-158185.js39
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-160010.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-162085.js71
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-170856.js33
-rw-r--r--deps/v8/test/mjsunit/regress/regress-crbug-18639.js14
-rw-r--r--deps/v8/test/mjsunit/regress/regress-delete-empty-double.js40
-rw-r--r--deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js41
-rw-r--r--deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js37
-rw-r--r--deps/v8/test/mjsunit/shift-for-integer-div.js59
-rw-r--r--deps/v8/test/mjsunit/stack-traces-overflow.js122
-rw-r--r--deps/v8/test/mjsunit/strict-mode.js47
-rw-r--r--deps/v8/test/mjsunit/string-natives.js72
-rw-r--r--deps/v8/test/mjsunit/string-split.js17
-rw-r--r--deps/v8/test/mjsunit/testcfg.py3
-rw-r--r--deps/v8/test/mjsunit/tools/tickprocessor-test.log38
-rw-r--r--deps/v8/test/mjsunit/uri.js12
-rw-r--r--deps/v8/test/mozilla/mozilla.status10
-rw-r--r--deps/v8/test/test262/README4
-rw-r--r--deps/v8/test/test262/test262.status12
-rw-r--r--deps/v8/test/test262/testcfg.py11
-rw-r--r--deps/v8/tools/gen-postmortem-metadata.py14
-rwxr-xr-xdeps/v8/tools/grokdump.py151
-rw-r--r--deps/v8/tools/gyp/v8.gyp10
-rwxr-xr-xdeps/v8/tools/ll_prof.py63
-rwxr-xr-xdeps/v8/tools/plot-timer-events71
-rw-r--r--deps/v8/tools/plot-timer-events.js576
-rwxr-xr-xdeps/v8/tools/run-llprof.sh69
-rwxr-xr-xdeps/v8/tools/run-tests.py13
-rw-r--r--deps/v8/tools/tick-processor.html168
-rw-r--r--deps/v8/tools/tickprocessor.js16
349 files changed, 11394 insertions, 24676 deletions
diff --git a/deps/v8/.gitignore b/deps/v8/.gitignore
index fe8425f02..0bf931335 100644
--- a/deps/v8/.gitignore
+++ b/deps/v8/.gitignore
@@ -18,7 +18,6 @@
#*#
*~
.cpplint-cache
-.d8_history
d8
d8_g
shell
@@ -51,7 +50,3 @@ shell_g
/xcodebuild
TAGS
*.Makefile
-GTAGS
-GRTAGS
-GSYMS
-GPATH
diff --git a/deps/v8/AUTHORS b/deps/v8/AUTHORS
index c279e7c2d..1156d9495 100644
--- a/deps/v8/AUTHORS
+++ b/deps/v8/AUTHORS
@@ -20,7 +20,6 @@ Burcu Dogan <burcujdogan@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com>
-Derek J Conrod <dconrod@codeaurora.org>
Dineel D Sule <dsule@codeaurora.org>
Erich Ocean <erich.ocean@me.com>
Fedor Indutny <fedor@indutny.com>
@@ -45,7 +44,6 @@ Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
-Rajeev R Krithivasan <rkrithiv@codeaurora.org>
Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com>
@@ -55,7 +53,6 @@ Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Tobias Burnus <burnus@net-b.de>
Vlad Burlik <vladbph@gmail.com>
-Xi Qian <xi.qian@intel.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>
Zhongping Wang <kewpie.w.zp@gmail.com>
diff --git a/deps/v8/ChangeLog b/deps/v8/ChangeLog
index 52601a467..7c435c8b6 100644
--- a/deps/v8/ChangeLog
+++ b/deps/v8/ChangeLog
@@ -1,161 +1,3 @@
-2012-12-10: Version 3.15.11
-
- Define CAN_USE_VFP2/3_INSTRUCTIONS based on arm_neon and arm_fpu GYP
- flags.
-
- Performance and stability improvements on all platforms.
-
-
-2012-12-07: Version 3.15.10
-
- Enabled optimisation of functions inside eval. (issue 2315)
-
- Fixed spec violations in methods of Number.prototype. (issue 2443)
-
- Added GCTracer metrics for a scavenger GC for DOM wrappers.
-
- Performance and stability improvements on all platforms.
-
-
-2012-12-06: Version 3.15.9
-
- Fixed candidate eviction in code flusher.
- (Chromium issue 159140)
-
- Iterate through all arguments for side effects in Math.min/max.
- (issue 2444)
-
- Fixed spec violations related to regexp.lastIndex
- (issue 2437, issue 2438)
-
- Performance and stability improvements on all platforms.
-
-
-2012-12-04: Version 3.15.8
-
- Enforced stack allocation of TryCatch blocks.
- (issue 2166,chromium:152389)
-
- Fixed external exceptions in external try-catch handlers.
- (issue 2166)
-
- Activated incremental code flushing by default.
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-30: Version 3.15.7
-
- Activated code aging by default.
-
- Included more information in --prof log.
-
- Removed eager sweeping for lazy swept spaces. Try to find in
- SlowAllocateRaw a bounded number of times a big enough memory slot.
- (issue 2194)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-26: Version 3.15.6
-
- Ensure double arrays are filled with holes when extended from
- variations of empty arrays. (Chromium issue 162085)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-23: Version 3.15.5
-
- Fixed JSON.stringify for objects with interceptor handlers.
- (Chromium issue 161028)
-
- Fixed corner case in x64 compare stubs. (issue 2416)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-16: Version 3.15.4
-
- Fixed Array.prototype.join evaluation order. (issue 2263)
-
- Perform CPU sampling by CPU sampling thread only iff processing thread
- is not running. (issue 2364)
-
- When using an Object as a set in Object.getOwnPropertyNames, null out
- the proto. (issue 2410)
-
- Disabled EXTRA_CHECKS in Release build.
-
- Heap explorer: Show representation of strings.
-
- Removed 'type' and 'arguments' properties from Error object.
- (issue 2397)
-
- Added atomics implementation for ThreadSanitizer v2.
- (Chromium issue 128314)
-
- Fixed LiveEdit crashes when object/array literal is added. (issue 2368)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-13: Version 3.15.3
-
- Changed sample shell to send non-JS output (e.g. errors) to stderr
- instead of stdout.
-
- Correctly check for stack overflow even when interrupt is pending.
- (issue 214)
-
- Collect stack trace on stack overflow. (issue 2394)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-12: Version 3.15.2
-
- Function::GetScriptOrigin supplies sourceURL when script name is
- not available. (Chromium issue 159413)
-
- Made formatting error message side-effect-free. (issue 2398)
-
- Fixed length check in JSON.stringify. (Chromium issue 160010)
-
- ES6: Added support for Set and Map clear method (issue 2400)
-
- Fixed slack tracking when instance prototype changes.
- (Chromium issue 157019)
-
- Fixed disabling of code flusher while marking. (Chromium issue 159140)
-
- Added a test case for object grouping in a scavenger GC (issue 2077)
-
- Support shared library build of Android for v8.
- (Chromium issue 158821)
-
- ES6: Added support for size to Set and Map (issue 2395)
-
- Performance and stability improvements on all platforms.
-
-
-2012-11-06: Version 3.15.1
-
- Put incremental code flushing behind a flag. (Chromium issue 159140)
-
- Performance and stability improvements on all platforms.
-
-
-2012-10-31: Version 3.15.0
-
- Loosened aligned code target requirement on ARM (issue 2380)
-
- Fixed JSON.parse to treat leading zeros correctly.
- (Chromium issue 158185)
-
- Performance and stability improvements on all platforms.
-
-
2012-10-22: Version 3.14.5
Killed off the SCons based build.
diff --git a/deps/v8/build/android.gypi b/deps/v8/build/android.gypi
index 67a9d3582..d2d1a3572 100644
--- a/deps/v8/build/android.gypi
+++ b/deps/v8/build/android.gypi
@@ -122,6 +122,8 @@
'ldflags': [
'-nostdlib',
'-Wl,--no-undefined',
+ # Don't export symbols from statically linked libraries.
+ '-Wl,--exclude-libs=ALL',
],
'libraries!': [
'-lrt', # librt is built into Bionic.
@@ -217,13 +219,6 @@
['_type=="shared_library"', {
'ldflags': [
'-Wl,-shared,-Bsymbolic',
- '<(android_lib)/crtbegin_so.o',
- ],
- }],
- ['_type=="static_library"', {
- 'ldflags': [
- # Don't export symbols from statically linked libraries.
- '-Wl,--exclude-libs=ALL',
],
}],
],
diff --git a/deps/v8/build/common.gypi b/deps/v8/build/common.gypi
index 44bebae93..78888b8d7 100644
--- a/deps/v8/build/common.gypi
+++ b/deps/v8/build/common.gypi
@@ -70,6 +70,9 @@
'v8_enable_disassembler%': 0,
+ # Enable extra checks in API functions and other strategic places.
+ 'v8_enable_extra_checks%': 1,
+
'v8_enable_gdbjit%': 0,
'v8_object_print%': 0,
@@ -111,6 +114,9 @@
['v8_enable_disassembler==1', {
'defines': ['ENABLE_DISASSEMBLER',],
}],
+ ['v8_enable_extra_checks==1', {
+ 'defines': ['ENABLE_EXTRA_CHECKS',],
+ }],
['v8_enable_gdbjit==1', {
'defines': ['ENABLE_GDB_JIT_INTERFACE',],
}],
@@ -128,11 +134,6 @@
'V8_TARGET_ARCH_ARM',
],
'conditions': [
- ['armv7==1', {
- 'defines': [
- 'CAN_USE_ARMV7_INSTRUCTIONS=1',
- ],
- }],
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
@@ -143,16 +144,12 @@
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
- # NEON implies VFP3 and VFP3 implies VFP2.
- [ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
- arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
+ [ 'v8_can_use_vfp2_instructions=="true"', {
'defines': [
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
- # NEON implies VFP3.
- [ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
- arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
+ [ 'v8_can_use_vfp3_instructions=="true"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
@@ -160,7 +157,7 @@
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
- 'CAN_USE_VFP2_INSTRUCTIONS',
+ 'CAN_USE_VFP3_INSTRUCTIONS',
],
'target_conditions': [
['_toolset=="target"', {
@@ -203,11 +200,10 @@
['mips_arch_variant=="mips32r2"', {
'cflags': ['-mips32r2', '-Wa,-mips32r2'],
}],
- ['mips_arch_variant=="mips32r1"', {
- 'cflags': ['-mips32', '-Wa,-mips32'],
- }],
['mips_arch_variant=="loongson"', {
'cflags': ['-mips3', '-Wa,-mips3'],
+ }, {
+ 'cflags': ['-mips32', '-Wa,-mips32'],
}],
],
}],
@@ -334,9 +330,6 @@
], # conditions
'configurations': {
'Debug': {
- 'variables': {
- 'v8_enable_extra_checks%': 1,
- },
'defines': [
'DEBUG',
'ENABLE_DISASSEMBLER',
@@ -361,9 +354,6 @@
},
},
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd"', {
'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
'-Wnon-virtual-dtor', '-Woverloaded-virtual' ],
@@ -382,23 +372,21 @@
}],
],
}],
- ['OS=="mac"', {
- 'xcode_settings': {
- 'GCC_OPTIMIZATION_LEVEL': '0', # -O0
- },
- }],
],
}, # Debug
'Release': {
- 'variables': {
- 'v8_enable_extra_checks%': 0,
- },
'conditions': [
- ['v8_enable_extra_checks==1', {
- 'defines': ['ENABLE_EXTRA_CHECKS',],
- }],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
+ 'cflags!': [
+ '-O2',
+ '-Os',
+ ],
+ 'cflags': [
+ '-fdata-sections',
+ '-ffunction-sections',
+ '-O3',
+ ],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [
diff --git a/deps/v8/include/v8-profiler.h b/deps/v8/include/v8-profiler.h
index 4d3597ac7..c1e9a9e0b 100644
--- a/deps/v8/include/v8-profiler.h
+++ b/deps/v8/include/v8-profiler.h
@@ -407,28 +407,13 @@ class V8EXPORT HeapProfiler {
static const SnapshotObjectId kUnknownObjectId = 0;
/**
- * Callback interface for retrieving user friendly names of global objects.
- */
- class ObjectNameResolver {
- public:
- /**
- * Returns name to be used in the heap snapshot for given node. Returned
- * string must stay alive until snapshot collection is completed.
- */
- virtual const char* GetName(Handle<Object> object) = 0;
- protected:
- virtual ~ObjectNameResolver() {}
- };
-
- /**
* Takes a heap snapshot and returns it. Title may be an empty string.
* See HeapSnapshot::Type for types description.
*/
static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
- ActivityControl* control = NULL,
- ObjectNameResolver* global_object_name_resolver = NULL);
+ ActivityControl* control = NULL);
/**
* Starts tracking of heap objects population statistics. After calling
diff --git a/deps/v8/include/v8.h b/deps/v8/include/v8.h
index f577e937a..245dc5a82 100644
--- a/deps/v8/include/v8.h
+++ b/deps/v8/include/v8.h
@@ -76,22 +76,6 @@
#endif // _WIN32
-#if defined(__GNUC__) && !defined(DEBUG)
-#define V8_INLINE(declarator) inline __attribute__((always_inline)) declarator
-#elif defined(_MSC_VER) && !defined(DEBUG)
-#define V8_INLINE(declarator) __forceinline declarator
-#else
-#define V8_INLINE(declarator) inline declarator
-#endif
-
-#if defined(__GNUC__) && !V8_DISABLE_DEPRECATIONS
-#define V8_DEPRECATED(declarator) declarator __attribute__ ((deprecated))
-#elif defined(_MSC_VER) && !V8_DISABLE_DEPRECATIONS
-#define V8_DEPRECATED(declarator) __declspec(deprecated) declarator
-#else
-#define V8_DEPRECATED(declarator) declarator
-#endif
-
/**
* The v8 JavaScript engine.
*/
@@ -192,12 +176,12 @@ template <class T> class Handle {
/**
* Creates an empty handle.
*/
- V8_INLINE(Handle()) : val_(0) {}
+ inline Handle() : val_(0) {}
/**
* Creates a new handle for the specified value.
*/
- V8_INLINE(explicit Handle(T* val)) : val_(val) {}
+ inline explicit Handle(T* val) : val_(val) {}
/**
* Creates a handle for the contents of the specified handle. This
@@ -209,7 +193,7 @@ template <class T> class Handle {
* Handle<String> to a variable declared as Handle<Value>, is legal
* because String is a subclass of Value.
*/
- template <class S> V8_INLINE(Handle(Handle<S> that))
+ template <class S> inline Handle(Handle<S> that)
: val_(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -222,16 +206,16 @@ template <class T> class Handle {
/**
* Returns true if the handle is empty.
*/
- V8_INLINE(bool IsEmpty() const) { return val_ == 0; }
+ inline bool IsEmpty() const { return val_ == 0; }
/**
* Sets the handle to be empty. IsEmpty() will then return true.
*/
- V8_INLINE(void Clear()) { val_ = 0; }
+ inline void Clear() { val_ = 0; }
- V8_INLINE(T* operator->() const) { return val_; }
+ inline T* operator->() const { return val_; }
- V8_INLINE(T* operator*() const) { return val_; }
+ inline T* operator*() const { return val_; }
/**
* Checks whether two handles are the same.
@@ -239,7 +223,7 @@ template <class T> class Handle {
* to which they refer are identical.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE(bool operator==(Handle<S> that) const) {
+ template <class S> inline bool operator==(Handle<S> that) const {
internal::Object** a = reinterpret_cast<internal::Object**>(**this);
internal::Object** b = reinterpret_cast<internal::Object**>(*that);
if (a == 0) return b == 0;
@@ -253,11 +237,11 @@ template <class T> class Handle {
* the objects to which they refer are different.
* The handles' references are not checked.
*/
- template <class S> V8_INLINE(bool operator!=(Handle<S> that) const) {
+ template <class S> inline bool operator!=(Handle<S> that) const {
return !operator==(that);
}
- template <class S> V8_INLINE(static Handle<T> Cast(Handle<S> that)) {
+ template <class S> static inline Handle<T> Cast(Handle<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -266,7 +250,7 @@ template <class T> class Handle {
return Handle<T>(T::Cast(*that));
}
- template <class S> V8_INLINE(Handle<S> As()) {
+ template <class S> inline Handle<S> As() {
return Handle<S>::Cast(*this);
}
@@ -284,8 +268,8 @@ template <class T> class Handle {
*/
template <class T> class Local : public Handle<T> {
public:
- V8_INLINE(Local());
- template <class S> V8_INLINE(Local(Local<S> that))
+ inline Local();
+ template <class S> inline Local(Local<S> that)
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -294,8 +278,8 @@ template <class T> class Local : public Handle<T> {
*/
TYPE_CHECK(T, S);
}
- template <class S> V8_INLINE(Local(S* that) : Handle<T>(that)) { }
- template <class S> V8_INLINE(static Local<T> Cast(Local<S> that)) {
+ template <class S> inline Local(S* that) : Handle<T>(that) { }
+ template <class S> static inline Local<T> Cast(Local<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -304,17 +288,15 @@ template <class T> class Local : public Handle<T> {
return Local<T>(T::Cast(*that));
}
- template <class S> V8_INLINE(Local<S> As()) {
+ template <class S> inline Local<S> As() {
return Local<S>::Cast(*this);
}
- /**
- * Create a local handle for the content of another handle.
- * The referee is kept alive by the local handle even when
- * the original handle is destroyed/disposed.
+ /** Create a local handle for the content of another handle.
+ * The referee is kept alive by the local handle even when
+ * the original handle is destroyed/disposed.
*/
- V8_INLINE(static Local<T> New(Handle<T> that));
- V8_INLINE(static Local<T> New(Isolate* isolate, Handle<T> that));
+ inline static Local<T> New(Handle<T> that);
};
@@ -341,7 +323,7 @@ template <class T> class Persistent : public Handle<T> {
* Creates an empty persistent handle that doesn't point to any
* storage cell.
*/
- V8_INLINE(Persistent());
+ inline Persistent();
/**
* Creates a persistent handle for the same storage cell as the
@@ -354,7 +336,7 @@ template <class T> class Persistent : public Handle<T> {
* Persistent<String> to a variable declared as Persistent<Value>,
* is allowed as String is a subclass of Value.
*/
- template <class S> V8_INLINE(Persistent(Persistent<S> that))
+ template <class S> inline Persistent(Persistent<S> that)
: Handle<T>(reinterpret_cast<T*>(*that)) {
/**
* This check fails when trying to convert between incompatible
@@ -364,16 +346,16 @@ template <class T> class Persistent : public Handle<T> {
TYPE_CHECK(T, S);
}
- template <class S> V8_INLINE(Persistent(S* that)) : Handle<T>(that) { }
+ template <class S> inline Persistent(S* that) : Handle<T>(that) { }
/**
* "Casts" a plain handle which is known to be a persistent handle
* to a persistent handle.
*/
- template <class S> explicit V8_INLINE(Persistent(Handle<S> that))
+ template <class S> explicit inline Persistent(Handle<S> that)
: Handle<T>(*that) { }
- template <class S> V8_INLINE(static Persistent<T> Cast(Persistent<S> that)) {
+ template <class S> static inline Persistent<T> Cast(Persistent<S> that) {
#ifdef V8_ENABLE_CHECKS
// If we're going to perform the type check then we have to check
// that the handle isn't empty before doing the checked cast.
@@ -382,7 +364,7 @@ template <class T> class Persistent : public Handle<T> {
return Persistent<T>(T::Cast(*that));
}
- template <class S> V8_INLINE(Persistent<S> As()) {
+ template <class S> inline Persistent<S> As() {
return Persistent<S>::Cast(*this);
}
@@ -390,7 +372,7 @@ template <class T> class Persistent : public Handle<T> {
* Creates a new persistent handle for an existing local or
* persistent handle.
*/
- V8_INLINE(static Persistent<T> New(Handle<T> that));
+ inline static Persistent<T> New(Handle<T> that);
/**
* Releases the storage cell referenced by this persistent handle.
@@ -398,8 +380,7 @@ template <class T> class Persistent : public Handle<T> {
* This handle's reference, and any other references to the storage
* cell remain and IsEmpty will still return false.
*/
- V8_INLINE(void Dispose());
- V8_INLINE(void Dispose(Isolate* isolate));
+ inline void Dispose();
/**
* Make the reference to this object weak. When only weak handles
@@ -407,13 +388,10 @@ template <class T> class Persistent : public Handle<T> {
* callback to the given V8::WeakReferenceCallback function, passing
* it the object reference and the given parameters.
*/
- V8_INLINE(void MakeWeak(void* parameters, WeakReferenceCallback callback));
- V8_INLINE(void MakeWeak(Isolate* isolate,
- void* parameters,
- WeakReferenceCallback callback));
+ inline void MakeWeak(void* parameters, WeakReferenceCallback callback);
/** Clears the weak reference to this object. */
- V8_INLINE(void ClearWeak());
+ inline void ClearWeak();
/**
* Marks the reference to this object independent. Garbage collector
@@ -422,42 +400,28 @@ template <class T> class Persistent : public Handle<T> {
* assume that it will be preceded by a global GC prologue callback
* or followed by a global GC epilogue callback.
*/
- V8_INLINE(void MarkIndependent());
- V8_INLINE(void MarkIndependent(Isolate* isolate));
-
- /**
- * Marks the reference to this object partially dependent. Partially
- * dependent handles only depend on other partially dependent handles and
- * these dependencies are provided through object groups. It provides a way
- * to build smaller object groups for young objects that represent only a
- * subset of all external dependencies. This mark is automatically cleared
- * after each garbage collection.
- */
- V8_INLINE(void MarkPartiallyDependent());
- V8_INLINE(void MarkPartiallyDependent(Isolate* isolate));
+ inline void MarkIndependent();
/** Returns true if this handle was previously marked as independent. */
- V8_INLINE(bool IsIndependent() const);
- V8_INLINE(bool IsIndependent(Isolate* isolate) const);
+ inline bool IsIndependent() const;
/** Checks if the handle holds the only reference to an object. */
- V8_INLINE(bool IsNearDeath() const);
+ inline bool IsNearDeath() const;
/** Returns true if the handle's reference is weak. */
- V8_INLINE(bool IsWeak() const);
- V8_INLINE(bool IsWeak(Isolate* isolate) const);
+ inline bool IsWeak() const;
/**
* Assigns a wrapper class ID to the handle. See RetainedObjectInfo
* interface description in v8-profiler.h for details.
*/
- V8_INLINE(void SetWrapperClassId(uint16_t class_id));
+ inline void SetWrapperClassId(uint16_t class_id);
/**
* Returns the class ID previously assigned to this handle or 0 if no class
* ID was previously assigned.
*/
- V8_INLINE(uint16_t WrapperClassId() const);
+ inline uint16_t WrapperClassId() const;
private:
friend class ImplementationUtilities;
@@ -500,14 +464,12 @@ class V8EXPORT HandleScope {
* Creates a new handle with the given value.
*/
static internal::Object** CreateHandle(internal::Object* value);
- static internal::Object** CreateHandle(internal::Isolate* isolate,
- internal::Object* value);
// Faster version, uses HeapObject to obtain the current Isolate.
static internal::Object** CreateHandle(internal::HeapObject* value);
private:
- // Make it hard to create heap-allocated or illegal handle scopes by
- // disallowing certain operations.
+ // Make it impossible to create heap-allocated or illegal handle
+ // scopes by disallowing certain operations.
HandleScope(const HandleScope&);
void operator=(const HandleScope&);
void* operator new(size_t size);
@@ -520,7 +482,7 @@ class V8EXPORT HandleScope {
internal::Object** next;
internal::Object** limit;
int level;
- V8_INLINE(void Initialize()) {
+ inline void Initialize() {
next = limit = NULL;
level = 0;
}
@@ -613,16 +575,16 @@ class V8EXPORT ScriptData { // NOLINT
*/
class ScriptOrigin {
public:
- V8_INLINE(ScriptOrigin(
+ inline ScriptOrigin(
Handle<Value> resource_name,
Handle<Integer> resource_line_offset = Handle<Integer>(),
- Handle<Integer> resource_column_offset = Handle<Integer>()))
+ Handle<Integer> resource_column_offset = Handle<Integer>())
: resource_name_(resource_name),
resource_line_offset_(resource_line_offset),
resource_column_offset_(resource_column_offset) { }
- V8_INLINE(Handle<Value> ResourceName() const);
- V8_INLINE(Handle<Integer> ResourceLineOffset() const);
- V8_INLINE(Handle<Integer> ResourceColumnOffset() const);
+ inline Handle<Value> ResourceName() const;
+ inline Handle<Integer> ResourceLineOffset() const;
+ inline Handle<Integer> ResourceColumnOffset() const;
private:
Handle<Value> resource_name_;
Handle<Integer> resource_line_offset_;
@@ -910,13 +872,13 @@ class Value : public Data {
* Returns true if this value is the undefined value. See ECMA-262
* 4.3.10.
*/
- V8_INLINE(bool IsUndefined() const);
+ inline bool IsUndefined() const;
/**
* Returns true if this value is the null value. See ECMA-262
* 4.3.11.
*/
- V8_INLINE(bool IsNull() const);
+ inline bool IsNull() const;
/**
* Returns true if this value is true.
@@ -932,7 +894,7 @@ class Value : public Data {
* Returns true if this value is an instance of the String type.
* See ECMA-262 8.4.
*/
- V8_INLINE(bool IsString() const);
+ inline bool IsString() const;
/**
* Returns true if this value is a function.
@@ -1030,9 +992,9 @@ class Value : public Data {
V8EXPORT bool StrictEquals(Handle<Value> that) const;
private:
- V8_INLINE(bool QuickIsUndefined() const);
- V8_INLINE(bool QuickIsNull() const);
- V8_INLINE(bool QuickIsString() const);
+ inline bool QuickIsUndefined() const;
+ inline bool QuickIsNull() const;
+ inline bool QuickIsString() const;
V8EXPORT bool FullIsUndefined() const;
V8EXPORT bool FullIsNull() const;
V8EXPORT bool FullIsString() const;
@@ -1052,7 +1014,7 @@ class Primitive : public Value { };
class Boolean : public Primitive {
public:
V8EXPORT bool Value() const;
- V8_INLINE(static Handle<Boolean> New(bool value));
+ static inline Handle<Boolean> New(bool value);
};
@@ -1137,7 +1099,7 @@ class String : public Primitive {
* A zero length string.
*/
V8EXPORT static v8::Local<v8::String> Empty();
- V8_INLINE(static v8::Local<v8::String> Empty(Isolate* isolate));
+ inline static v8::Local<v8::String> Empty(Isolate* isolate);
/**
* Returns true if the string is external
@@ -1233,14 +1195,14 @@ class String : public Primitive {
* regardless of the encoding, otherwise return NULL. The encoding of the
* string is returned in encoding_out.
*/
- V8_INLINE(ExternalStringResourceBase* GetExternalStringResourceBase(
- Encoding* encoding_out) const);
+ inline ExternalStringResourceBase* GetExternalStringResourceBase(
+ Encoding* encoding_out) const;
/**
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
- V8_INLINE(ExternalStringResource* GetExternalStringResource() const);
+ inline ExternalStringResource* GetExternalStringResource() const;
/**
* Get the ExternalAsciiStringResource for an external ASCII string.
@@ -1249,7 +1211,7 @@ class String : public Primitive {
V8EXPORT const ExternalAsciiStringResource* GetExternalAsciiStringResource()
const;
- V8_INLINE(static String* Cast(v8::Value* obj));
+ static inline String* Cast(v8::Value* obj);
/**
* Allocates a new string from either UTF-8 encoded or ASCII data.
@@ -1413,7 +1375,7 @@ class Number : public Primitive {
public:
V8EXPORT double Value() const;
V8EXPORT static Local<Number> New(double value);
- V8_INLINE(static Number* Cast(v8::Value* obj));
+ static inline Number* Cast(v8::Value* obj);
private:
V8EXPORT Number();
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -1430,7 +1392,7 @@ class Integer : public Number {
V8EXPORT static Local<Integer> New(int32_t value, Isolate*);
V8EXPORT static Local<Integer> NewFromUnsigned(uint32_t value, Isolate*);
V8EXPORT int64_t Value() const;
- V8_INLINE(static Integer* Cast(v8::Value* obj));
+ static inline Integer* Cast(v8::Value* obj);
private:
V8EXPORT Integer();
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -1625,42 +1587,16 @@ class Object : public Value {
/** Gets the number of internal fields for this Object. */
V8EXPORT int InternalFieldCount();
-
- /** Gets the value from an internal field. */
- V8_INLINE(Local<Value> GetInternalField(int index));
-
+ /** Gets the value in an internal field. */
+ inline Local<Value> GetInternalField(int index);
/** Sets the value in an internal field. */
V8EXPORT void SetInternalField(int index, Handle<Value> value);
- /**
- * Gets a native pointer from an internal field. Deprecated. If the pointer is
- * always 2-byte-aligned, use GetAlignedPointerFromInternalField instead,
- * otherwise use a combination of GetInternalField, External::Cast and
- * External::Value.
- */
- V8EXPORT V8_DEPRECATED(void* GetPointerFromInternalField(int index));
-
- /**
- * Sets a native pointer in an internal field. Deprecated. If the pointer is
- * always 2-byte aligned, use SetAlignedPointerInInternalField instead,
- * otherwise use a combination of External::New and SetInternalField.
- */
- V8_DEPRECATED(V8_INLINE(void SetPointerInInternalField(int index,
- void* value)));
-
- /**
- * Gets a 2-byte-aligned native pointer from an internal field. This field
- * must have been set by SetAlignedPointerInInternalField, everything else
- * leads to undefined behavior.
- */
- V8_INLINE(void* GetAlignedPointerFromInternalField(int index));
+ /** Gets a native pointer from an internal field. */
+ inline void* GetPointerFromInternalField(int index);
- /**
- * Sets a 2-byte-aligned native pointer in an internal field. To retrieve such
- * a field, GetAlignedPointerFromInternalField must be used, everything else
- * leads to undefined behavior.
- */
- V8EXPORT void SetAlignedPointerInInternalField(int index, void* value);
+ /** Sets a native pointer in an internal field. */
+ V8EXPORT void SetPointerInInternalField(int index, void* value);
// Testers for local properties.
V8EXPORT bool HasOwnProperty(Handle<String> key);
@@ -1786,13 +1722,19 @@ class Object : public Value {
Handle<Value> argv[]);
V8EXPORT static Local<Object> New();
- V8_INLINE(static Object* Cast(Value* obj));
+ static inline Object* Cast(Value* obj);
private:
V8EXPORT Object();
V8EXPORT static void CheckCast(Value* obj);
- V8EXPORT Local<Value> SlowGetInternalField(int index);
- V8EXPORT void* SlowGetAlignedPointerFromInternalField(int index);
+ V8EXPORT Local<Value> CheckedGetInternalField(int index);
+ V8EXPORT void* SlowGetPointerFromInternalField(int index);
+
+ /**
+ * If quick access to the internal field is possible this method
+ * returns the value. Otherwise an empty handle is returned.
+ */
+ inline Local<Value> UncheckedGetInternalField(int index);
};
@@ -1815,7 +1757,7 @@ class Array : public Object {
*/
V8EXPORT static Local<Array> New(int length = 0);
- V8_INLINE(static Array* Cast(Value* obj));
+ static inline Array* Cast(Value* obj);
private:
V8EXPORT Array();
V8EXPORT static void CheckCast(Value* obj);
@@ -1855,7 +1797,7 @@ class Function : public Object {
V8EXPORT int GetScriptColumnNumber() const;
V8EXPORT Handle<Value> GetScriptId() const;
V8EXPORT ScriptOrigin GetScriptOrigin() const;
- V8_INLINE(static Function* Cast(Value* obj));
+ static inline Function* Cast(Value* obj);
V8EXPORT static const int kLineOffsetNotFound;
private:
@@ -1877,7 +1819,7 @@ class Date : public Object {
*/
V8EXPORT double NumberValue() const;
- V8_INLINE(static Date* Cast(v8::Value* obj));
+ static inline Date* Cast(v8::Value* obj);
/**
* Notification that the embedder has changed the time zone,
@@ -1910,7 +1852,7 @@ class NumberObject : public Object {
*/
V8EXPORT double NumberValue() const;
- V8_INLINE(static NumberObject* Cast(v8::Value* obj));
+ static inline NumberObject* Cast(v8::Value* obj);
private:
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -1929,7 +1871,7 @@ class BooleanObject : public Object {
*/
V8EXPORT bool BooleanValue() const;
- V8_INLINE(static BooleanObject* Cast(v8::Value* obj));
+ static inline BooleanObject* Cast(v8::Value* obj);
private:
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -1948,7 +1890,7 @@ class StringObject : public Object {
*/
V8EXPORT Local<String> StringValue() const;
- V8_INLINE(static StringObject* Cast(v8::Value* obj));
+ static inline StringObject* Cast(v8::Value* obj);
private:
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -1995,7 +1937,7 @@ class RegExp : public Object {
*/
V8EXPORT Flags GetFlags() const;
- V8_INLINE(static RegExp* Cast(v8::Value* obj));
+ static inline RegExp* Cast(v8::Value* obj);
private:
V8EXPORT static void CheckCast(v8::Value* obj);
@@ -2003,22 +1945,29 @@ class RegExp : public Object {
/**
- * A JavaScript value that wraps a C++ void*. This type of value is mainly used
- * to associate C++ data structures with JavaScript objects.
+ * A JavaScript value that wraps a C++ void*. This type of value is
+ * mainly used to associate C++ data structures with JavaScript
+ * objects.
+ *
+ * The Wrap function V8 will return the most optimal Value object wrapping the
+ * C++ void*. The type of the value is not guaranteed to be an External object
+ * and no assumptions about its type should be made. To access the wrapped
+ * value Unwrap should be used, all other operations on that object will lead
+ * to unpredictable results.
*/
class External : public Value {
public:
- /** Deprecated, use New instead. */
- V8_DEPRECATED(V8_INLINE(static Local<Value> Wrap(void* value)));
-
- /** Deprecated, use a combination of Cast and Value instead. */
- V8_DEPRECATED(V8_INLINE(static void* Unwrap(Handle<Value> obj)));
+ V8EXPORT static Local<Value> Wrap(void* data);
+ static inline void* Unwrap(Handle<Value> obj);
V8EXPORT static Local<External> New(void* value);
- V8_INLINE(static External* Cast(Value* obj));
+ static inline External* Cast(Value* obj);
V8EXPORT void* Value() const;
private:
+ V8EXPORT External();
V8EXPORT static void CheckCast(v8::Value* obj);
+ static inline void* QuickUnwrap(Handle<v8::Value> obj);
+ V8EXPORT static void* FullUnwrap(Handle<v8::Value> obj);
};
@@ -2033,7 +1982,7 @@ class V8EXPORT Template : public Data {
/** Adds a property to each instance created by this template.*/
void Set(Handle<String> name, Handle<Data> value,
PropertyAttribute attributes = None);
- V8_INLINE(void Set(const char* name, Handle<Data> value));
+ inline void Set(const char* name, Handle<Data> value);
private:
Template();
@@ -2050,14 +1999,14 @@ class V8EXPORT Template : public Data {
*/
class Arguments {
public:
- V8_INLINE(int Length() const);
- V8_INLINE(Local<Value> operator[](int i) const);
- V8_INLINE(Local<Function> Callee() const);
- V8_INLINE(Local<Object> This() const);
- V8_INLINE(Local<Object> Holder() const);
- V8_INLINE(bool IsConstructCall() const);
- V8_INLINE(Local<Value> Data() const);
- V8_INLINE(Isolate* GetIsolate() const);
+ inline int Length() const;
+ inline Local<Value> operator[](int i) const;
+ inline Local<Function> Callee() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
+ inline bool IsConstructCall() const;
+ inline Local<Value> Data() const;
+ inline Isolate* GetIsolate() const;
private:
static const int kIsolateIndex = 0;
@@ -2066,10 +2015,10 @@ class Arguments {
static const int kHolderIndex = -3;
friend class ImplementationUtilities;
- V8_INLINE(Arguments(internal::Object** implicit_args,
+ inline Arguments(internal::Object** implicit_args,
internal::Object** values,
int length,
- bool is_construct_call));
+ bool is_construct_call);
internal::Object** implicit_args_;
internal::Object** values_;
int length_;
@@ -2083,12 +2032,12 @@ class Arguments {
*/
class V8EXPORT AccessorInfo {
public:
- V8_INLINE(AccessorInfo(internal::Object** args))
+ inline AccessorInfo(internal::Object** args)
: args_(args) { }
- V8_INLINE(Isolate* GetIsolate() const);
- V8_INLINE(Local<Value> Data() const);
- V8_INLINE(Local<Object> This() const);
- V8_INLINE(Local<Object> Holder() const);
+ inline Isolate* GetIsolate() const;
+ inline Local<Value> Data() const;
+ inline Local<Object> This() const;
+ inline Local<Object> Holder() const;
private:
internal::Object** args_;
@@ -2653,7 +2602,7 @@ void V8EXPORT RegisterExtension(Extension* extension);
*/
class V8EXPORT DeclareExtension {
public:
- V8_INLINE(DeclareExtension(Extension* extension)) {
+ inline DeclareExtension(Extension* extension) {
RegisterExtension(extension);
}
};
@@ -2667,10 +2616,10 @@ Handle<Primitive> V8EXPORT Null();
Handle<Boolean> V8EXPORT True();
Handle<Boolean> V8EXPORT False();
-V8_INLINE(Handle<Primitive> Undefined(Isolate* isolate));
-V8_INLINE(Handle<Primitive> Null(Isolate* isolate));
-V8_INLINE(Handle<Boolean> True(Isolate* isolate));
-V8_INLINE(Handle<Boolean> False(Isolate* isolate));
+inline Handle<Primitive> Undefined(Isolate* isolate);
+inline Handle<Primitive> Null(Isolate* isolate);
+inline Handle<Boolean> True(Isolate* isolate);
+inline Handle<Boolean> False(Isolate* isolate);
/**
@@ -2824,7 +2773,6 @@ class V8EXPORT HeapStatistics {
HeapStatistics();
size_t total_heap_size() { return total_heap_size_; }
size_t total_heap_size_executable() { return total_heap_size_executable_; }
- size_t total_physical_size() { return total_physical_size_; }
size_t used_heap_size() { return used_heap_size_; }
size_t heap_size_limit() { return heap_size_limit_; }
@@ -2833,15 +2781,11 @@ class V8EXPORT HeapStatistics {
void set_total_heap_size_executable(size_t size) {
total_heap_size_executable_ = size;
}
- void set_total_physical_size(size_t size) {
- total_physical_size_ = size;
- }
void set_used_heap_size(size_t size) { used_heap_size_ = size; }
void set_heap_size_limit(size_t size) { heap_size_limit_ = size; }
size_t total_heap_size_;
size_t total_heap_size_executable_;
- size_t total_physical_size_;
size_t used_heap_size_;
size_t heap_size_limit_;
@@ -2927,13 +2871,13 @@ class V8EXPORT Isolate {
/**
* Associate embedder-specific data with the isolate
*/
- V8_INLINE(void SetData(void* data));
+ inline void SetData(void* data);
/**
* Retrieve embedder-specific data from the isolate.
* Returns NULL if SetData has never been called.
*/
- V8_INLINE(void* GetData());
+ inline void* GetData();
private:
Isolate();
@@ -3205,6 +3149,12 @@ class V8EXPORT V8 {
static void SetCreateHistogramFunction(CreateHistogramCallback);
static void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
+ /**
+ * Enables the computation of a sliding window of states. The sliding
+ * window information is recorded in statistics counters.
+ */
+ static void EnableSlidingStateWindow();
+
/** Callback function for reporting failed access checks.*/
static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
@@ -3299,19 +3249,12 @@ class V8EXPORT V8 {
* After each garbage collection, object groups are removed. It is
* intended to be used in the before-garbage-collection callback
* function, for instance to simulate DOM tree connections among JS
- * wrapper objects. Object groups for all dependent handles need to
- * be provided for kGCTypeMarkSweepCompact collections, for all other
- * garbage collection types it is sufficient to provide object groups
- * for partially dependent handles only.
+ * wrapper objects.
* See v8-profiler.h for RetainedObjectInfo interface description.
*/
static void AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info = NULL);
- static void AddObjectGroup(Isolate* isolate,
- Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info = NULL);
/**
* Allows the host application to declare implicit references between
@@ -3496,8 +3439,8 @@ class V8EXPORT V8 {
/**
* Iterates through all external resources referenced from current isolate
- * heap. GC is not invoked prior to iterating, therefore there is no
- * guarantee that visited objects are still alive.
+ * heap. This method is not expected to be used except for debugging purposes
+ * and may be quite slow.
*/
static void VisitExternalResources(ExternalResourceVisitor* visitor);
@@ -3540,29 +3483,14 @@ class V8EXPORT V8 {
static internal::Object** GlobalizeReference(internal::Object** handle);
static void DisposeGlobal(internal::Object** global_handle);
- static void DisposeGlobal(internal::Isolate* isolate,
- internal::Object** global_handle);
static void MakeWeak(internal::Object** global_handle,
void* data,
WeakReferenceCallback);
- static void MakeWeak(internal::Isolate* isolate,
- internal::Object** global_handle,
- void* data,
- WeakReferenceCallback);
static void ClearWeak(internal::Object** global_handle);
static void MarkIndependent(internal::Object** global_handle);
- static void MarkIndependent(internal::Isolate* isolate,
- internal::Object** global_handle);
- static void MarkPartiallyDependent(internal::Object** global_handle);
- static void MarkPartiallyDependent(internal::Isolate* isolate,
- internal::Object** global_handle);
static bool IsGlobalIndependent(internal::Object** global_handle);
- static bool IsGlobalIndependent(internal::Isolate* isolate,
- internal::Object** global_handle);
static bool IsGlobalNearDeath(internal::Object** global_handle);
static bool IsGlobalWeak(internal::Object** global_handle);
- static bool IsGlobalWeak(internal::Isolate* isolate,
- internal::Object** global_handle);
static void SetWrapperClassId(internal::Object** global_handle,
uint16_t class_id);
static uint16_t GetWrapperClassId(internal::Object** global_handle);
@@ -3580,9 +3508,7 @@ class V8EXPORT V8 {
class V8EXPORT TryCatch {
public:
/**
- * Creates a new try/catch block and registers it with v8. Note that
- * all TryCatch blocks should be stack allocated because the memory
- * location itself is compared against JavaScript try/catch blocks.
+ * Creates a new try/catch block and registers it with v8.
*/
TryCatch();
@@ -3672,12 +3598,6 @@ class V8EXPORT TryCatch {
void SetCaptureMessage(bool value);
private:
- // Make it hard to create heap-allocated TryCatch blocks.
- TryCatch(const TryCatch&);
- void operator=(const TryCatch&);
- void* operator new(size_t size);
- void operator delete(void*, size_t);
-
v8::internal::Isolate* isolate_;
void* next_;
void* exception_;
@@ -3819,45 +3739,12 @@ class V8EXPORT Context {
static bool InContext();
/**
- * Gets embedder data with index 0. Deprecated, use GetEmbedderData with index
- * 0 instead.
+ * Associate an additional data object with the context. This is mainly used
+ * with the debugger to provide additional information on the context through
+ * the debugger API.
*/
- V8_DEPRECATED(V8_INLINE(Local<Value> GetData()));
-
- /**
- * Sets embedder data with index 0. Deprecated, use SetEmbedderData with index
- * 0 instead.
- */
- V8_DEPRECATED(V8_INLINE(void SetData(Handle<Value> value)));
-
- /**
- * Gets the embedder data with the given index, which must have been set by a
- * previous call to SetEmbedderData with the same index. Note that index 0
- * currently has a special meaning for Chrome's debugger.
- */
- V8_INLINE(Local<Value> GetEmbedderData(int index));
-
- /**
- * Sets the embedder data with the given index, growing the data as
- * needed. Note that index 0 currently has a special meaning for Chrome's
- * debugger.
- */
- void SetEmbedderData(int index, Handle<Value> value);
-
- /**
- * Gets a 2-byte-aligned native pointer from the embedder data with the given
- * index, which must have bees set by a previous call to
- * SetAlignedPointerInEmbedderData with the same index. Note that index 0
- * currently has a special meaning for Chrome's debugger.
- */
- V8_INLINE(void* GetAlignedPointerFromEmbedderData(int index));
-
- /**
- * Sets a 2-byte-aligned native pointer in the embedder data with the given
- * index, growing the data as needed. Note that index 0 currently has a
- * special meaning for Chrome's debugger.
- */
- void SetAlignedPointerInEmbedderData(int index, void* value);
+ void SetData(Handle<Value> data);
+ Local<Value> GetData();
/**
* Control whether code generation from strings is allowed. Calling
@@ -3893,10 +3780,10 @@ class V8EXPORT Context {
*/
class Scope {
public:
- explicit V8_INLINE(Scope(Handle<Context> context)) : context_(context) {
+ explicit inline Scope(Handle<Context> context) : context_(context) {
context_->Enter();
}
- V8_INLINE(~Scope()) { context_->Exit(); }
+ inline ~Scope() { context_->Exit(); }
private:
Handle<Context> context_;
};
@@ -3906,9 +3793,6 @@ class V8EXPORT Context {
friend class Script;
friend class Object;
friend class Function;
-
- Local<Value> SlowGetEmbedderData(int index);
- void* SlowGetAlignedPointerFromEmbedderData(int index);
};
@@ -4137,27 +4021,47 @@ template <size_t ptr_size> struct SmiTagging;
template <> struct SmiTagging<4> {
static const int kSmiShiftSize = 0;
static const int kSmiValueSize = 31;
- V8_INLINE(static int SmiToInt(internal::Object* value)) {
+ static inline int SmiToInt(internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Throw away top 32 bits and shift down (requires >> to be sign extending).
return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
}
+
+ // For 32-bit systems any 2 bytes aligned pointer can be encoded as smi
+ // with a plain reinterpret_cast.
+ static const uintptr_t kEncodablePointerMask = 0x1;
+ static const int kPointerToSmiShift = 0;
};
// Smi constants for 64-bit systems.
template <> struct SmiTagging<8> {
static const int kSmiShiftSize = 31;
static const int kSmiValueSize = 32;
- V8_INLINE(static int SmiToInt(internal::Object* value)) {
+ static inline int SmiToInt(internal::Object* value) {
int shift_bits = kSmiTagSize + kSmiShiftSize;
// Shift down and throw away top 32 bits.
return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
}
+
+ // To maximize the range of pointers that can be encoded
+ // in the available 32 bits, we require them to be 8 bytes aligned.
+ // This gives 2 ^ (32 + 3) = 32G address space covered.
+ // It might be not enough to cover stack allocated objects on some platforms.
+ static const int kPointerAlignment = 3;
+
+ static const uintptr_t kEncodablePointerMask =
+ ~(uintptr_t(0xffffffff) << kPointerAlignment);
+
+ static const int kPointerToSmiShift =
+ kSmiTagSize + kSmiShiftSize - kPointerAlignment;
};
typedef SmiTagging<kApiPointerSize> PlatformSmiTagging;
const int kSmiShiftSize = PlatformSmiTagging::kSmiShiftSize;
const int kSmiValueSize = PlatformSmiTagging::kSmiValueSize;
+const uintptr_t kEncodablePointerMask =
+ PlatformSmiTagging::kEncodablePointerMask;
+const int kPointerToSmiShift = PlatformSmiTagging::kPointerToSmiShift;
/**
* This class exports constants and functionality from within v8 that
@@ -4175,9 +4079,6 @@ class Internals {
static const int kOddballKindOffset = 3 * kApiPointerSize;
static const int kForeignAddressOffset = kApiPointerSize;
static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
- static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
- static const int kContextHeaderSize = 2 * kApiPointerSize;
- static const int kContextEmbedderDataIndex = 54;
static const int kFullStringRepresentationMask = 0x07;
static const int kStringEncodingMask = 0x4;
static const int kExternalTwoByteRepresentationTag = 0x02;
@@ -4190,7 +4091,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
- static const int kEmptySymbolRootIndex = 119;
+ static const int kEmptySymbolRootIndex = 117;
static const int kJSObjectType = 0xaa;
static const int kFirstNonstringType = 0x80;
@@ -4200,80 +4101,85 @@ class Internals {
static const int kUndefinedOddballKind = 5;
static const int kNullOddballKind = 3;
- V8_INLINE(static bool HasHeapObjectTag(internal::Object* value)) {
+ static inline bool HasHeapObjectTag(internal::Object* value) {
return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
kHeapObjectTag);
}
- V8_INLINE(static int SmiValue(internal::Object* value)) {
+ static inline bool HasSmiTag(internal::Object* value) {
+ return ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag);
+ }
+
+ static inline int SmiValue(internal::Object* value) {
return PlatformSmiTagging::SmiToInt(value);
}
- V8_INLINE(static int GetInstanceType(internal::Object* obj)) {
+ static inline int GetInstanceType(internal::Object* obj) {
typedef internal::Object O;
O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
}
- V8_INLINE(static int GetOddballKind(internal::Object* obj)) {
+ static inline int GetOddballKind(internal::Object* obj) {
typedef internal::Object O;
return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
}
- V8_INLINE(static bool IsExternalTwoByteString(int instance_type)) {
+ static inline void* GetExternalPointerFromSmi(internal::Object* value) {
+ const uintptr_t address = reinterpret_cast<uintptr_t>(value);
+ return reinterpret_cast<void*>(address >> kPointerToSmiShift);
+ }
+
+ static inline void* GetExternalPointer(internal::Object* obj) {
+ if (HasSmiTag(obj)) {
+ return GetExternalPointerFromSmi(obj);
+ } else if (GetInstanceType(obj) == kForeignType) {
+ return ReadField<void*>(obj, kForeignAddressOffset);
+ } else {
+ return NULL;
+ }
+ }
+
+ static inline bool IsExternalTwoByteString(int instance_type) {
int representation = (instance_type & kFullStringRepresentationMask);
return representation == kExternalTwoByteRepresentationTag;
}
- V8_INLINE(static bool IsInitialized(v8::Isolate* isolate)) {
+ static inline bool IsInitialized(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateStateOffset;
return *reinterpret_cast<int*>(addr) == 1;
}
- V8_INLINE(static void SetEmbedderData(v8::Isolate* isolate, void* data)) {
+ static inline void SetEmbedderData(v8::Isolate* isolate, void* data) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
*reinterpret_cast<void**>(addr) = data;
}
- V8_INLINE(static void* GetEmbedderData(v8::Isolate* isolate)) {
+ static inline void* GetEmbedderData(v8::Isolate* isolate) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
kIsolateEmbedderDataOffset;
return *reinterpret_cast<void**>(addr);
}
- V8_INLINE(static internal::Object** GetRoot(v8::Isolate* isolate,
- int index)) {
+ static inline internal::Object** GetRoot(v8::Isolate* isolate, int index) {
uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) + kIsolateRootsOffset;
return reinterpret_cast<internal::Object**>(addr + index * kApiPointerSize);
}
template <typename T>
- V8_INLINE(static T ReadField(Object* ptr, int offset)) {
+ static inline T ReadField(Object* ptr, int offset) {
uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
return *reinterpret_cast<T*>(addr);
}
- template <typename T>
- V8_INLINE(static T ReadEmbedderData(Context* context, int index)) {
- typedef internal::Object O;
- typedef internal::Internals I;
- O* ctx = *reinterpret_cast<O**>(context);
- int embedder_data_offset = I::kContextHeaderSize +
- (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
- O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
- int value_offset =
- I::kFixedArrayHeaderSize + (internal::kApiPointerSize * index);
- return I::ReadField<T>(embedder_data, value_offset);
- }
-
- V8_INLINE(static bool CanCastToHeapObject(void* o)) { return false; }
- V8_INLINE(static bool CanCastToHeapObject(Context* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(String* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(Object* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(Message* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(StackTrace* o)) { return true; }
- V8_INLINE(static bool CanCastToHeapObject(StackFrame* o)) { return true; }
+ static inline bool CanCastToHeapObject(void* o) { return false; }
+ static inline bool CanCastToHeapObject(Context* o) { return true; }
+ static inline bool CanCastToHeapObject(String* o) { return true; }
+ static inline bool CanCastToHeapObject(Object* o) { return true; }
+ static inline bool CanCastToHeapObject(Message* o) { return true; }
+ static inline bool CanCastToHeapObject(StackTrace* o) { return true; }
+ static inline bool CanCastToHeapObject(StackFrame* o) { return true; }
};
} // namespace internal
@@ -4297,16 +4203,6 @@ Local<T> Local<T>::New(Handle<T> that) {
template <class T>
- Local<T> Local<T>::New(Isolate* isolate, Handle<T> that) {
- if (that.IsEmpty()) return Local<T>();
- T* that_ptr = *that;
- internal::Object** p = reinterpret_cast<internal::Object**>(that_ptr);
- return Local<T>(reinterpret_cast<T*>(HandleScope::CreateHandle(
- reinterpret_cast<internal::Isolate*>(isolate), *p)));
-}
-
-
-template <class T>
Persistent<T> Persistent<T>::New(Handle<T> that) {
if (that.IsEmpty()) return Persistent<T>();
internal::Object** p = reinterpret_cast<internal::Object**>(*that);
@@ -4322,14 +4218,6 @@ bool Persistent<T>::IsIndependent() const {
template <class T>
-bool Persistent<T>::IsIndependent(Isolate* isolate) const {
- if (this->IsEmpty()) return false;
- return V8::IsGlobalIndependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
bool Persistent<T>::IsNearDeath() const {
if (this->IsEmpty()) return false;
return V8::IsGlobalNearDeath(reinterpret_cast<internal::Object**>(**this));
@@ -4344,14 +4232,6 @@ bool Persistent<T>::IsWeak() const {
template <class T>
-bool Persistent<T>::IsWeak(Isolate* isolate) const {
- if (this->IsEmpty()) return false;
- return V8::IsGlobalWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
void Persistent<T>::Dispose() {
if (this->IsEmpty()) return;
V8::DisposeGlobal(reinterpret_cast<internal::Object**>(**this));
@@ -4359,14 +4239,6 @@ void Persistent<T>::Dispose() {
template <class T>
-void Persistent<T>::Dispose(Isolate* isolate) {
- if (this->IsEmpty()) return;
- V8::DisposeGlobal(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
-}
-
-
-template <class T>
Persistent<T>::Persistent() : Handle<T>() { }
template <class T>
@@ -4377,15 +4249,6 @@ void Persistent<T>::MakeWeak(void* parameters, WeakReferenceCallback callback) {
}
template <class T>
-void Persistent<T>::MakeWeak(Isolate* isolate, void* parameters,
- WeakReferenceCallback callback) {
- V8::MakeWeak(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this),
- parameters,
- callback);
-}
-
-template <class T>
void Persistent<T>::ClearWeak() {
V8::ClearWeak(reinterpret_cast<internal::Object**>(**this));
}
@@ -4396,23 +4259,6 @@ void Persistent<T>::MarkIndependent() {
}
template <class T>
-void Persistent<T>::MarkIndependent(Isolate* isolate) {
- V8::MarkIndependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
-}
-
-template <class T>
-void Persistent<T>::MarkPartiallyDependent() {
- V8::MarkPartiallyDependent(reinterpret_cast<internal::Object**>(**this));
-}
-
-template <class T>
-void Persistent<T>::MarkPartiallyDependent(Isolate* isolate) {
- V8::MarkPartiallyDependent(reinterpret_cast<internal::Isolate*>(isolate),
- reinterpret_cast<internal::Object**>(**this));
-}
-
-template <class T>
void Persistent<T>::SetWrapperClassId(uint16_t class_id) {
V8::SetWrapperClassId(reinterpret_cast<internal::Object**>(**this), class_id);
}
@@ -4508,40 +4354,63 @@ void Template::Set(const char* name, v8::Handle<Data> value) {
Local<Value> Object::GetInternalField(int index) {
#ifndef V8_ENABLE_CHECKS
+ Local<Value> quick_result = UncheckedGetInternalField(index);
+ if (!quick_result.IsEmpty()) return quick_result;
+#endif
+ return CheckedGetInternalField(index);
+}
+
+
+Local<Value> Object::UncheckedGetInternalField(int index) {
typedef internal::Object O;
typedef internal::Internals I;
O* obj = *reinterpret_cast<O**>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
if (I::GetInstanceType(obj) == I::kJSObjectType) {
+ // If the object is a plain JSObject, which is the common case,
+ // we know where to find the internal fields and can return the
+ // value directly.
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
O* value = I::ReadField<O*>(obj, offset);
O** result = HandleScope::CreateHandle(value);
return Local<Value>(reinterpret_cast<Value*>(result));
+ } else {
+ return Local<Value>();
}
+}
+
+
+void* External::Unwrap(Handle<v8::Value> obj) {
+#ifdef V8_ENABLE_CHECKS
+ return FullUnwrap(obj);
+#else
+ return QuickUnwrap(obj);
#endif
- return SlowGetInternalField(index);
}
-void Object::SetPointerInInternalField(int index, void* value) {
- SetInternalField(index, External::New(value));
+void* External::QuickUnwrap(Handle<v8::Value> wrapper) {
+ typedef internal::Object O;
+ O* obj = *reinterpret_cast<O**>(const_cast<v8::Value*>(*wrapper));
+ return internal::Internals::GetExternalPointer(obj);
}
-void* Object::GetAlignedPointerFromInternalField(int index) {
-#ifndef V8_ENABLE_CHECKS
+void* Object::GetPointerFromInternalField(int index) {
typedef internal::Object O;
typedef internal::Internals I;
+
O* obj = *reinterpret_cast<O**>(this);
- // Fast path: If the object is a plain JSObject, which is the common case, we
- // know where to find the internal fields and can return the value directly.
+
if (I::GetInstanceType(obj) == I::kJSObjectType) {
+ // If the object is a plain JSObject, which is the common case,
+ // we know where to find the internal fields and can return the
+ // value directly.
int offset = I::kJSObjectHeaderSize + (internal::kApiPointerSize * index);
- return I::ReadField<void*>(obj, offset);
+ O* value = I::ReadField<O*>(obj, offset);
+ return I::GetExternalPointer(value);
}
-#endif
- return SlowGetAlignedPointerFromInternalField(index);
+
+ return SlowGetPointerFromInternalField(index);
}
@@ -4733,16 +4602,6 @@ Function* Function::Cast(v8::Value* value) {
}
-Local<Value> External::Wrap(void* value) {
- return External::New(value);
-}
-
-
-void* External::Unwrap(Handle<v8::Value> obj) {
- return External::Cast(*obj)->Value();
-}
-
-
External* External::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@@ -4819,37 +4678,6 @@ void* Isolate::GetData() {
}
-Local<Value> Context::GetData() {
- return GetEmbedderData(0);
-}
-
-void Context::SetData(Handle<Value> data) {
- SetEmbedderData(0, data);
-}
-
-
-Local<Value> Context::GetEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- typedef internal::Object O;
- typedef internal::Internals I;
- O** result = HandleScope::CreateHandle(I::ReadEmbedderData<O*>(this, index));
- return Local<Value>(reinterpret_cast<Value*>(result));
-#else
- return SlowGetEmbedderData(index);
-#endif
-}
-
-
-void* Context::GetAlignedPointerFromEmbedderData(int index) {
-#ifndef V8_ENABLE_CHECKS
- typedef internal::Internals I;
- return I::ReadEmbedderData<void*>(this, index);
-#else
- return SlowGetAlignedPointerFromEmbedderData(index);
-#endif
-}
-
-
/**
* \example shell.cc
* A simple shell that takes a list of expressions on the
diff --git a/deps/v8/samples/shell.cc b/deps/v8/samples/shell.cc
index 62f404554..821ef75a7 100644
--- a/deps/v8/samples/shell.cc
+++ b/deps/v8/samples/shell.cc
@@ -72,7 +72,7 @@ int main(int argc, char* argv[]) {
v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext();
if (context.IsEmpty()) {
- fprintf(stderr, "Error creating context\n");
+ printf("Error creating context\n");
return 1;
}
context->Enter();
@@ -226,8 +226,7 @@ int RunMain(int argc, char* argv[]) {
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
- fprintf(stderr,
- "Warning: unknown flag %s.\nTry --help for options\n", str);
+ printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly.
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
@@ -238,7 +237,7 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
- fprintf(stderr, "Error reading '%s'\n", str);
+ printf("Error reading '%s'\n", str);
continue;
}
if (!ExecuteString(source, file_name, false, true)) return 1;
@@ -250,20 +249,20 @@ int RunMain(int argc, char* argv[]) {
// The read-eval-execute loop of the shell.
void RunShell(v8::Handle<v8::Context> context) {
- fprintf(stderr, "V8 version %s [sample shell]\n", v8::V8::GetVersion());
+ printf("V8 version %s [sample shell]\n", v8::V8::GetVersion());
static const int kBufferSize = 256;
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
v8::Local<v8::String> name(v8::String::New("(shell)"));
while (true) {
char buffer[kBufferSize];
- fprintf(stderr, "> ");
+ printf("> ");
char* str = fgets(buffer, kBufferSize, stdin);
if (str == NULL) break;
v8::HandleScope handle_scope;
ExecuteString(v8::String::New(str), name, true, true);
}
- fprintf(stderr, "\n");
+ printf("\n");
}
@@ -311,31 +310,31 @@ void ReportException(v8::TryCatch* try_catch) {
if (message.IsEmpty()) {
// V8 didn't provide any extra information about this error; just
// print the exception.
- fprintf(stderr, "%s\n", exception_string);
+ printf("%s\n", exception_string);
} else {
// Print (filename):(line number): (message).
v8::String::Utf8Value filename(message->GetScriptResourceName());
const char* filename_string = ToCString(filename);
int linenum = message->GetLineNumber();
- fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
+ printf("%s:%i: %s\n", filename_string, linenum, exception_string);
// Print line of source code.
v8::String::Utf8Value sourceline(message->GetSourceLine());
const char* sourceline_string = ToCString(sourceline);
- fprintf(stderr, "%s\n", sourceline_string);
+ printf("%s\n", sourceline_string);
// Print wavy underline (GetUnderline is deprecated).
int start = message->GetStartColumn();
for (int i = 0; i < start; i++) {
- fprintf(stderr, " ");
+ printf(" ");
}
int end = message->GetEndColumn();
for (int i = start; i < end; i++) {
- fprintf(stderr, "^");
+ printf("^");
}
- fprintf(stderr, "\n");
+ printf("\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
- fprintf(stderr, "%s\n", stack_trace_string);
+ printf("%s\n", stack_trace_string);
}
}
}
diff --git a/deps/v8/src/accessors.cc b/deps/v8/src/accessors.cc
index efcaf8f29..1bc9221a2 100644
--- a/deps/v8/src/accessors.cc
+++ b/deps/v8/src/accessors.cc
@@ -112,7 +112,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
HandleScope scope(isolate);
// Protect raw pointers.
- Handle<JSArray> array_handle(JSArray::cast(object), isolate);
+ Handle<JSObject> object_handle(object, isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
@@ -122,7 +122,7 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
if (has_exception) return Failure::Exception();
if (uint32_v->Number() == number_v->Number()) {
- return array_handle->SetElementsLength(*uint32_v);
+ return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
}
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
@@ -465,46 +465,24 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
- Object* value_raw,
+ Object* value,
void*) {
- Isolate* isolate = object->GetIsolate();
- Heap* heap = isolate->heap();
- JSFunction* function_raw = FindInstanceOf<JSFunction>(object);
- if (function_raw == NULL) return heap->undefined_value();
- if (!function_raw->should_have_prototype()) {
+ Heap* heap = object->GetHeap();
+ JSFunction* function = FindInstanceOf<JSFunction>(object);
+ if (function == NULL) return heap->undefined_value();
+ if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
- value_raw,
+ value,
NONE);
}
- HandleScope scope(isolate);
- Handle<JSFunction> function(function_raw, isolate);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value;
- bool is_observed =
- FLAG_harmony_observation &&
- *function == object &&
- function->map()->is_observed();
- if (is_observed) {
- if (function->has_prototype())
- old_value = handle(function->prototype(), isolate);
- else
- old_value = isolate->factory()->NewFunctionPrototype(function);
- }
-
- Handle<Object> result;
- MaybeObject* maybe_result = function->SetPrototype(*value);
- if (!maybe_result->ToHandle(&result, isolate)) return maybe_result;
- ASSERT(function->prototype() == *value);
-
- if (is_observed && !old_value->SameValue(*value)) {
- JSObject::EnqueueChangeRecord(
- function, "updated", isolate->factory()->prototype_symbol(), old_value);
+ Object* prototype;
+ { MaybeObject* maybe_prototype = function->SetPrototype(value);
+ if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
-
- return *function;
+ ASSERT(function->prototype() == value);
+ return function;
}
@@ -671,6 +649,19 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
+static MaybeObject* CheckNonStrictCallerOrThrow(
+ Isolate* isolate,
+ JSFunction* caller) {
+ DisableAssertNoAllocation enable_allocation;
+ if (!caller->shared()->is_classic_mode()) {
+ return isolate->Throw(
+ *isolate->factory()->NewTypeError("strict_caller",
+ HandleVector<Object>(NULL, 0)));
+ }
+ return caller;
+}
+
+
class FrameFunctionIterator {
public:
FrameFunctionIterator(Isolate* isolate, const AssertNoAllocation& promise)
@@ -757,14 +748,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
if (caller->shared()->bound()) {
return isolate->heap()->null_value();
}
- // Censor if the caller is not a classic mode function.
- // Change from ES5, which used to throw, see:
- // https://bugs.ecmascript.org/show_bug.cgi?id=310
- if (!caller->shared()->is_classic_mode()) {
- return isolate->heap()->null_value();
- }
-
- return caller;
+ return CheckNonStrictCallerOrThrow(isolate, caller);
}
@@ -780,7 +764,7 @@ const AccessorDescriptor Accessors::FunctionCaller = {
//
-static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
+MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
Object* current = receiver->GetPrototype();
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
@@ -790,36 +774,12 @@ static inline Object* GetPrototypeSkipHiddenPrototypes(Object* receiver) {
}
-MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
- return GetPrototypeSkipHiddenPrototypes(receiver);
-}
-
-
-MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
- Object* value_raw,
+MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver,
+ Object* value,
void*) {
- const bool kSkipHiddenPrototypes = true;
+ const bool skip_hidden_prototypes = true;
// To be consistent with other Set functions, return the value.
- if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
- return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
-
- Isolate* isolate = receiver_raw->GetIsolate();
- HandleScope scope(isolate);
- Handle<JSObject> receiver(receiver_raw);
- Handle<Object> value(value_raw);
- Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(*receiver));
-
- MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(*receiver));
- if (!new_value->SameValue(*old_value)) {
- JSObject::EnqueueChangeRecord(receiver, "prototype",
- isolate->factory()->Proto_symbol(),
- old_value);
- }
- return *hresult;
+ return receiver->SetPrototype(value, skip_hidden_prototypes);
}
diff --git a/deps/v8/src/api.cc b/deps/v8/src/api.cc
index 95e5340a5..e0ad29b83 100644
--- a/deps/v8/src/api.cc
+++ b/deps/v8/src/api.cc
@@ -630,16 +630,7 @@ void V8::MakeWeak(i::Object** object, void* parameters,
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "MakeWeak");
isolate->global_handles()->MakeWeak(object, parameters,
- callback);
-}
-
-
-void V8::MakeWeak(i::Isolate* isolate, i::Object** object,
- void* parameters, WeakReferenceCallback callback) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MakeWeak");
- isolate->global_handles()->MakeWeak(object, parameters,
- callback);
+ callback);
}
@@ -652,32 +643,11 @@ void V8::ClearWeak(i::Object** obj) {
void V8::MarkIndependent(i::Object** object) {
i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MarkIndependent");
- isolate->global_handles()->MarkIndependent(object);
-}
-
-
-void V8::MarkIndependent(i::Isolate* isolate, i::Object** object) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MarkIndependent");
+ LOG_API(isolate, "MakeIndependent");
isolate->global_handles()->MarkIndependent(object);
}
-void V8::MarkPartiallyDependent(i::Object** object) {
- i::Isolate* isolate = i::Isolate::Current();
- LOG_API(isolate, "MarkPartiallyDependent");
- isolate->global_handles()->MarkPartiallyDependent(object);
-}
-
-
-void V8::MarkPartiallyDependent(i::Isolate* isolate, i::Object** object) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "MarkPartiallyDependent");
- isolate->global_handles()->MarkPartiallyDependent(object);
-}
-
-
bool V8::IsGlobalIndependent(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalIndependent");
@@ -686,14 +656,6 @@ bool V8::IsGlobalIndependent(i::Object** obj) {
}
-bool V8::IsGlobalIndependent(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "IsGlobalIndependent");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsIndependent(obj);
-}
-
-
bool V8::IsGlobalNearDeath(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "IsGlobalNearDeath");
@@ -710,14 +672,6 @@ bool V8::IsGlobalWeak(i::Object** obj) {
}
-bool V8::IsGlobalWeak(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "IsGlobalWeak");
- if (!isolate->IsInitialized()) return false;
- return i::GlobalHandles::IsWeak(obj);
-}
-
-
void V8::DisposeGlobal(i::Object** obj) {
i::Isolate* isolate = i::Isolate::Current();
LOG_API(isolate, "DisposeGlobal");
@@ -725,14 +679,6 @@ void V8::DisposeGlobal(i::Object** obj) {
isolate->global_handles()->Destroy(obj);
}
-
-void V8::DisposeGlobal(i::Isolate* isolate, i::Object** obj) {
- ASSERT(isolate == i::Isolate::Current());
- LOG_API(isolate, "DisposeGlobal");
- if (!isolate->IsInitialized()) return;
- isolate->global_handles()->Destroy(obj);
-}
-
// --- H a n d l e s ---
@@ -786,12 +732,6 @@ i::Object** HandleScope::CreateHandle(i::Object* value) {
}
-i::Object** HandleScope::CreateHandle(i::Isolate* isolate, i::Object* value) {
- ASSERT(isolate == i::Isolate::Current());
- return i::HandleScope::CreateHandle(value, isolate);
-}
-
-
i::Object** HandleScope::CreateHandle(i::HeapObject* value) {
ASSERT(value->IsHeapObject());
return reinterpret_cast<i::Object**>(
@@ -833,77 +773,33 @@ void Context::Exit() {
}
-static void* DecodeSmiToAligned(i::Object* value, const char* location) {
- ApiCheck(value->IsSmi(), location, "Not a Smi");
- return reinterpret_cast<void*>(value);
-}
-
-
-static i::Smi* EncodeAlignedAsSmi(void* value, const char* location) {
- i::Smi* smi = reinterpret_cast<i::Smi*>(value);
- ApiCheck(smi->IsSmi(), location, "Pointer is not aligned");
- return smi;
-}
-
-
-static i::Handle<i::FixedArray> EmbedderDataFor(Context* context,
- int index,
- bool can_grow,
- const char* location) {
- i::Handle<i::Context> env = Utils::OpenHandle(context);
- bool ok = !IsDeadCheck(env->GetIsolate(), location) &&
- ApiCheck(env->IsNativeContext(), location, "Not a native context") &&
- ApiCheck(index >= 0, location, "Negative index");
- if (!ok) return i::Handle<i::FixedArray>();
- i::Handle<i::FixedArray> data(env->embedder_data());
- if (index < data->length()) return data;
- if (!can_grow) {
- Utils::ReportApiFailure(location, "Index too large");
- return i::Handle<i::FixedArray>();
+void Context::SetData(v8::Handle<Value> data) {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Context::SetData()")) return;
+ i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
+ ASSERT(env->IsNativeContext());
+ if (env->IsNativeContext()) {
+ env->set_data(*raw_data);
}
- int new_size = i::Max(index, data->length() << 1) + 1;
- data = env->GetIsolate()->factory()->CopySizeFixedArray(data, new_size);
- env->set_embedder_data(*data);
- return data;
}
-v8::Local<v8::Value> Context::SlowGetEmbedderData(int index) {
- const char* location = "v8::Context::GetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
- if (data.is_null()) return Local<Value>();
- i::Handle<i::Object> result(data->get(index), data->GetIsolate());
+v8::Local<v8::Value> Context::GetData() {
+ i::Handle<i::Context> env = Utils::OpenHandle(this);
+ i::Isolate* isolate = env->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Context::GetData()")) {
+ return Local<Value>();
+ }
+ ASSERT(env->IsNativeContext());
+ if (!env->IsNativeContext()) {
+ return Local<Value>();
+ }
+ i::Handle<i::Object> result(env->data(), isolate);
return Utils::ToLocal(result);
}
-void Context::SetEmbedderData(int index, v8::Handle<Value> value) {
- const char* location = "v8::Context::SetEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
- if (data.is_null()) return;
- i::Handle<i::Object> val = Utils::OpenHandle(*value);
- data->set(index, *val);
- ASSERT_EQ(*Utils::OpenHandle(*value),
- *Utils::OpenHandle(*GetEmbedderData(index)));
-}
-
-
-void* Context::SlowGetAlignedPointerFromEmbedderData(int index) {
- const char* location = "v8::Context::GetAlignedPointerFromEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, false, location);
- if (data.is_null()) return NULL;
- return DecodeSmiToAligned(data->get(index), location);
-}
-
-
-void Context::SetAlignedPointerInEmbedderData(int index, void* value) {
- const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
- i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
- data->set(index, EncodeAlignedAsSmi(value, location));
- ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
-}
-
-
i::Object** v8::HandleScope::RawClose(i::Object** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
@@ -925,7 +821,7 @@ i::Object** v8::HandleScope::RawClose(i::Object** value) {
}
// Allocate a new handle on the previous handle block.
- i::Handle<i::Object> handle(result, isolate_);
+ i::Handle<i::Object> handle(result);
return handle.location();
}
@@ -1260,7 +1156,7 @@ void FunctionTemplate::SetHiddenPrototype(bool value) {
void FunctionTemplate::ReadOnlyPrototype() {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
- if (IsDeadCheck(isolate, "v8::FunctionTemplate::ReadOnlyPrototype()")) {
+ if (IsDeadCheck(isolate, "v8::FunctionTemplate::SetPrototypeAttributes()")) {
return;
}
ENTER_V8(isolate);
@@ -1704,8 +1600,6 @@ Local<Value> Script::Run() {
ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
LOG_API(isolate, "Script::Run");
ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@@ -2304,7 +2198,7 @@ bool Value::IsExternal() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsExternal()")) {
return false;
}
- return Utils::OpenHandle(this)->IsExternal();
+ return Utils::OpenHandle(this)->IsForeign();
}
@@ -2378,11 +2272,7 @@ static i::Object* LookupBuiltin(i::Isolate* isolate,
static bool CheckConstructor(i::Isolate* isolate,
i::Handle<i::JSObject> obj,
const char* class_name) {
- i::Object* constr = obj->map()->constructor();
- if (!constr->IsJSFunction()) return false;
- i::JSFunction* func = i::JSFunction::cast(constr);
- return func->shared()->native() &&
- constr == LookupBuiltin(isolate, class_name);
+ return obj->map()->constructor() == LookupBuiltin(isolate, class_name);
}
@@ -2537,7 +2427,8 @@ Local<Integer> Value::ToInteger() const {
void External::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::External::Cast()")) return;
- ApiCheck(Utils::OpenHandle(that)->IsExternal(),
+ i::Handle<i::Object> obj = Utils::OpenHandle(that);
+ ApiCheck(obj->IsForeign(),
"v8::External::Cast()",
"Could not convert to external");
}
@@ -2882,7 +2773,6 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::SetProperty(
- isolate,
self,
key_obj,
value_obj,
@@ -3437,7 +3327,7 @@ v8::Local<v8::Value> v8::Object::GetHiddenValue(v8::Handle<v8::String> key) {
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
i::Handle<i::String> key_symbol = FACTORY->LookupSymbol(key_obj);
- i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol), isolate);
+ i::Handle<i::Object> result(self->GetHiddenProperty(*key_symbol));
if (result->IsUndefined()) return v8::Local<v8::Value>();
return Utils::ToLocal(result);
}
@@ -3674,8 +3564,6 @@ Local<v8::Value> Object::CallAsFunction(v8::Handle<v8::Object> recv,
return Local<v8::Value>());
LOG_API(isolate, "Object::CallAsFunction");
ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -3707,8 +3595,6 @@ Local<v8::Value> Object::CallAsConstructor(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Object::CallAsConstructor");
ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3751,8 +3637,6 @@ Local<v8::Object> Function::NewInstance(int argc,
return Local<v8::Object>());
LOG_API(isolate, "Function::NewInstance");
ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
HandleScope scope;
i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3771,8 +3655,6 @@ Local<v8::Value> Function::Call(v8::Handle<v8::Object> recv, int argc,
ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
LOG_API(isolate, "Function::Call");
ENTER_V8(isolate);
- i::Logger::TimerEventScope timer_scope(
- isolate, i::Logger::TimerEventScope::v8_execute);
i::Object* raw_result = NULL;
{
i::HandleScope scope(isolate);
@@ -3816,9 +3698,8 @@ ScriptOrigin Function::GetScriptOrigin() const {
i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
if (func->shared()->script()->IsScript()) {
i::Handle<i::Script> script(i::Script::cast(func->shared()->script()));
- i::Handle<i::Object> scriptName = GetScriptNameOrSourceURL(script);
v8::ScriptOrigin origin(
- Utils::ToLocal(scriptName),
+ Utils::ToLocal(i::Handle<i::Object>(script->name())),
v8::Integer::New(script->line_offset()->value()),
v8::Integer::New(script->column_offset()->value()));
return origin;
@@ -3881,7 +3762,7 @@ static int RecursivelySerializeToUtf8(i::String* string,
int32_t* last_character) {
int utf8_bytes = 0;
while (true) {
- if (string->IsOneByteRepresentation()) {
+ if (string->IsAsciiRepresentation()) {
i::String::WriteToFlat(string, buffer, start, end);
*last_character = unibrow::Utf16::kNoPreviousCharacter;
return utf8_bytes + end - start;
@@ -3981,7 +3862,7 @@ int String::WriteUtf8(char* buffer,
FlattenString(str); // Flatten the string for efficiency.
}
int string_length = str->length();
- if (str->IsOneByteRepresentation()) {
+ if (str->IsAsciiRepresentation()) {
int len;
if (capacity == -1) {
capacity = str->length() + 1;
@@ -4115,7 +3996,7 @@ int String::WriteAscii(char* buffer,
FlattenString(str); // Flatten the string for efficiency.
}
- if (str->IsOneByteRepresentation()) {
+ if (str->IsAsciiRepresentation()) {
// WriteToFlat is faster than using the StringInputBuffer.
if (length == -1) length = str->length() + 1;
int len = i::Min(length, str->length() - start);
@@ -4230,7 +4111,7 @@ void v8::String::VerifyExternalStringResourceBase(
expectedEncoding = TWO_BYTE_ENCODING;
} else {
expected = NULL;
- expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
+ expectedEncoding = str->IsAsciiRepresentation() ? ASCII_ENCODING
: TWO_BYTE_ENCODING;
}
CHECK_EQ(expected, value);
@@ -4310,65 +4191,75 @@ int v8::Object::InternalFieldCount() {
}
-static bool InternalFieldOK(i::Handle<i::JSObject> obj,
- int index,
- const char* location) {
- return !IsDeadCheck(obj->GetIsolate(), location) &&
- ApiCheck(index < obj->GetInternalFieldCount(),
- location,
- "Internal field out of bounds");
-}
-
-
-Local<Value> v8::Object::SlowGetInternalField(int index) {
+Local<Value> v8::Object::CheckedGetInternalField(int index) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::GetInternalField()";
- if (!InternalFieldOK(obj, index, location)) return Local<Value>();
- i::Handle<i::Object> value(obj->GetInternalField(index), obj->GetIsolate());
- return Utils::ToLocal(value);
+ if (IsDeadCheck(obj->GetIsolate(), "v8::Object::GetInternalField()")) {
+ return Local<Value>();
+ }
+ if (!ApiCheck(index < obj->GetInternalFieldCount(),
+ "v8::Object::GetInternalField()",
+ "Reading internal field out of bounds")) {
+ return Local<Value>();
+ }
+ i::Handle<i::Object> value(obj->GetInternalField(index));
+ Local<Value> result = Utils::ToLocal(value);
+#ifdef DEBUG
+ Local<Value> unchecked = UncheckedGetInternalField(index);
+ ASSERT(unchecked.IsEmpty() || (unchecked == result));
+#endif
+ return result;
}
void v8::Object::SetInternalField(int index, v8::Handle<Value> value) {
i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::SetInternalField()";
- if (!InternalFieldOK(obj, index, location)) return;
+ i::Isolate* isolate = obj->GetIsolate();
+ if (IsDeadCheck(isolate, "v8::Object::SetInternalField()")) {
+ return;
+ }
+ if (!ApiCheck(index < obj->GetInternalFieldCount(),
+ "v8::Object::SetInternalField()",
+ "Writing internal field out of bounds")) {
+ return;
+ }
+ ENTER_V8(isolate);
i::Handle<i::Object> val = Utils::OpenHandle(*value);
obj->SetInternalField(index, *val);
- ASSERT_EQ(value, GetInternalField(index));
}
-void* v8::Object::SlowGetAlignedPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::GetAlignedPointerFromInternalField()";
- if (!InternalFieldOK(obj, index, location)) return NULL;
- return DecodeSmiToAligned(obj->GetInternalField(index), location);
+static bool CanBeEncodedAsSmi(void* ptr) {
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+ return ((address & i::kEncodablePointerMask) == 0);
}
-void v8::Object::SetAlignedPointerInInternalField(int index, void* value) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::SetAlignedPointerInInternalField()";
- if (!InternalFieldOK(obj, index, location)) return;
- obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
- ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
-}
-
-
-static void* ExternalValue(i::Object* obj) {
- // Obscure semantics for undefined, but somehow checked in our unit tests...
- if (obj->IsUndefined()) return NULL;
- i::Object* foreign = i::JSObject::cast(obj)->GetInternalField(0);
- return i::Foreign::cast(foreign)->foreign_address();
+static i::Smi* EncodeAsSmi(void* ptr) {
+ ASSERT(CanBeEncodedAsSmi(ptr));
+ const uintptr_t address = reinterpret_cast<uintptr_t>(ptr);
+ i::Smi* result = reinterpret_cast<i::Smi*>(address << i::kPointerToSmiShift);
+ ASSERT(i::Internals::HasSmiTag(result));
+ ASSERT_EQ(result, i::Smi::FromInt(result->value()));
+ ASSERT_EQ(ptr, i::Internals::GetExternalPointerFromSmi(result));
+ return result;
}
-void* Object::GetPointerFromInternalField(int index) {
- i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
- const char* location = "v8::Object::GetPointerFromInternalField()";
- if (!InternalFieldOK(obj, index, location)) return NULL;
- return ExternalValue(obj->GetInternalField(index));
+void v8::Object::SetPointerInInternalField(int index, void* value) {
+ i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+ ENTER_V8(isolate);
+ if (CanBeEncodedAsSmi(value)) {
+ Utils::OpenHandle(this)->SetInternalField(index, EncodeAsSmi(value));
+ } else {
+ HandleScope scope;
+ i::Handle<i::Foreign> foreign =
+ isolate->factory()->NewForeign(
+ reinterpret_cast<i::Address>(value), i::TENURED);
+ if (!foreign.is_null()) {
+ Utils::OpenHandle(this)->SetInternalField(index, *foreign);
+ }
+ }
+ ASSERT_EQ(value, GetPointerFromInternalField(index));
}
@@ -4423,7 +4314,6 @@ bool v8::V8::Dispose() {
HeapStatistics::HeapStatistics(): total_heap_size_(0),
total_heap_size_executable_(0),
- total_physical_size_(0),
used_heap_size_(0),
heap_size_limit_(0) { }
@@ -4433,7 +4323,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
// Isolate is unitialized thus heap is not configured yet.
heap_statistics->set_total_heap_size(0);
heap_statistics->set_total_heap_size_executable(0);
- heap_statistics->set_total_physical_size(0);
heap_statistics->set_used_heap_size(0);
heap_statistics->set_heap_size_limit(0);
return;
@@ -4443,7 +4332,6 @@ void v8::V8::GetHeapStatistics(HeapStatistics* heap_statistics) {
heap_statistics->set_total_heap_size(heap->CommittedMemory());
heap_statistics->set_total_heap_size_executable(
heap->CommittedMemoryExecutable());
- heap_statistics->set_total_physical_size(heap->CommittedPhysicalMemory());
heap_statistics->set_used_heap_size(heap->SizeOfObjects());
heap_statistics->set_heap_size_limit(heap->MaxReserved());
}
@@ -4680,14 +4568,13 @@ v8::Local<v8::Context> Context::GetCalling() {
v8::Local<v8::Object> Context::Global() {
- i::Isolate* isolate = i::Isolate::Current();
- if (IsDeadCheck(isolate, "v8::Context::Global()")) {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::Context::Global()")) {
return Local<v8::Object>();
}
i::Object** ctx = reinterpret_cast<i::Object**>(this);
i::Handle<i::Context> context =
i::Handle<i::Context>::cast(i::Handle<i::Object>(ctx));
- i::Handle<i::Object> global(context->global_proxy(), isolate);
+ i::Handle<i::Object> global(context->global_proxy());
return Utils::ToLocal(i::Handle<i::JSObject>::cast(global));
}
@@ -4808,20 +4695,74 @@ bool FunctionTemplate::HasInstance(v8::Handle<v8::Value> value) {
}
-Local<External> v8::External::New(void* value) {
- STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
+static Local<External> ExternalNewImpl(void* data) {
+ return Utils::ToLocal(FACTORY->NewForeign(static_cast<i::Address>(data)));
+}
+
+static void* ExternalValueImpl(i::Handle<i::Object> obj) {
+ return reinterpret_cast<void*>(i::Foreign::cast(*obj)->foreign_address());
+}
+
+
+Local<Value> v8::External::Wrap(void* data) {
+ i::Isolate* isolate = i::Isolate::Current();
+ STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
+ EnsureInitializedForIsolate(isolate, "v8::External::Wrap()");
+ LOG_API(isolate, "External::Wrap");
+ ENTER_V8(isolate);
+
+ v8::Local<v8::Value> result = CanBeEncodedAsSmi(data)
+ ? Utils::ToLocal(i::Handle<i::Object>(EncodeAsSmi(data)))
+ : v8::Local<v8::Value>(ExternalNewImpl(data));
+
+ ASSERT_EQ(data, Unwrap(result));
+ return result;
+}
+
+
+void* v8::Object::SlowGetPointerFromInternalField(int index) {
+ i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+ i::Object* value = obj->GetInternalField(index);
+ if (value->IsSmi()) {
+ return i::Internals::GetExternalPointerFromSmi(value);
+ } else if (value->IsForeign()) {
+ return reinterpret_cast<void*>(i::Foreign::cast(value)->foreign_address());
+ } else {
+ return NULL;
+ }
+}
+
+
+void* v8::External::FullUnwrap(v8::Handle<v8::Value> wrapper) {
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Unwrap()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(*wrapper);
+ void* result;
+ if (obj->IsSmi()) {
+ result = i::Internals::GetExternalPointerFromSmi(*obj);
+ } else if (obj->IsForeign()) {
+ result = ExternalValueImpl(obj);
+ } else {
+ result = NULL;
+ }
+ ASSERT_EQ(result, QuickUnwrap(wrapper));
+ return result;
+}
+
+
+Local<External> v8::External::New(void* data) {
+ STATIC_ASSERT(sizeof(data) == sizeof(i::Address));
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::External::New()");
LOG_API(isolate, "External::New");
ENTER_V8(isolate);
- i::Handle<i::JSObject> external = isolate->factory()->NewExternal(value);
- return Utils::ExternalToLocal(external);
+ return ExternalNewImpl(data);
}
void* External::Value() const {
- if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return NULL;
- return ExternalValue(*Utils::OpenHandle(this));
+ if (IsDeadCheck(i::Isolate::Current(), "v8::External::Value()")) return 0;
+ i::Handle<i::Object> obj = Utils::OpenHandle(this);
+ return ExternalValueImpl(obj);
}
@@ -5391,6 +5332,13 @@ void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
SetAddHistogramSampleFunction(callback);
}
+void V8::EnableSlidingStateWindow() {
+ i::Isolate* isolate = i::Isolate::Current();
+ if (IsDeadCheck(isolate, "v8::V8::EnableSlidingStateWindow()")) return;
+ isolate->logger()->EnableSlidingStateWindow();
+}
+
+
void V8::SetFailedAccessCheckCallbackFunction(
FailedAccessCheckCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
@@ -5400,7 +5348,6 @@ void V8::SetFailedAccessCheckCallbackFunction(
isolate->SetFailedAccessCheckCallback(callback);
}
-
void V8::AddObjectGroup(Persistent<Value>* objects,
size_t length,
RetainedObjectInfo* info) {
@@ -5412,19 +5359,6 @@ void V8::AddObjectGroup(Persistent<Value>* objects,
}
-void V8::AddObjectGroup(Isolate* exportedIsolate,
- Persistent<Value>* objects,
- size_t length,
- RetainedObjectInfo* info) {
- i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exportedIsolate);
- ASSERT(isolate == i::Isolate::Current());
- if (IsDeadCheck(isolate, "v8::V8::AddObjectGroup()")) return;
- STATIC_ASSERT(sizeof(Persistent<Value>) == sizeof(i::Object**));
- isolate->global_handles()->AddObjectGroup(
- reinterpret_cast<i::Object***>(objects), length, info);
-}
-
-
void V8::AddImplicitReferences(Persistent<Object> parent,
Persistent<Value>* children,
size_t length) {
@@ -6435,8 +6369,7 @@ SnapshotObjectId HeapProfiler::GetSnapshotObjectId(Handle<Value> value) {
const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
HeapSnapshot::Type type,
- ActivityControl* control,
- ObjectNameResolver* resolver) {
+ ActivityControl* control) {
i::Isolate* isolate = i::Isolate::Current();
IsDeadCheck(isolate, "v8::HeapProfiler::TakeSnapshot");
i::HeapSnapshot::Type internal_type = i::HeapSnapshot::kFull;
@@ -6449,7 +6382,7 @@ const HeapSnapshot* HeapProfiler::TakeSnapshot(Handle<String> title,
}
return reinterpret_cast<const HeapSnapshot*>(
i::HeapProfiler::TakeSnapshot(
- *Utils::OpenHandle(*title), internal_type, control, resolver));
+ *Utils::OpenHandle(*title), internal_type, control));
}
@@ -6560,7 +6493,6 @@ void Testing::PrepareStressRun(int run) {
void Testing::DeoptimizeAll() {
- i::HandleScope scope;
internal::Deoptimizer::DeoptimizeAll();
}
diff --git a/deps/v8/src/api.h b/deps/v8/src/api.h
index ca2240b64..7197b6cb5 100644
--- a/deps/v8/src/api.h
+++ b/deps/v8/src/api.h
@@ -201,6 +201,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSObject> obj);
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
+ static inline Local<External> ToLocal(
+ v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@@ -223,8 +225,6 @@ class Utils {
v8::internal::Handle<v8::internal::FunctionTemplateInfo> obj);
static inline Local<TypeSwitch> ToLocal(
v8::internal::Handle<v8::internal::TypeSwitchInfo> obj);
- static inline Local<External> ExternalToLocal(
- v8::internal::Handle<v8::internal::JSObject> obj);
#define DECLARE_OPEN_HANDLE(From, To) \
static inline v8::internal::Handle<v8::internal::To> \
@@ -268,6 +268,7 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
+MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@@ -279,7 +280,6 @@ MAKE_TO_LOCAL(StackFrameToLocal, JSObject, StackFrame)
MAKE_TO_LOCAL(NumberToLocal, Object, Number)
MAKE_TO_LOCAL(IntegerToLocal, Object, Integer)
MAKE_TO_LOCAL(Uint32ToLocal, Object, Uint32)
-MAKE_TO_LOCAL(ExternalToLocal, JSObject, External)
#undef MAKE_TO_LOCAL
diff --git a/deps/v8/src/arm/assembler-arm-inl.h b/deps/v8/src/arm/assembler-arm-inl.h
index acd61feff..6268c332c 100644
--- a/deps/v8/src/arm/assembler-arm-inl.h
+++ b/deps/v8/src/arm/assembler-arm-inl.h
@@ -86,7 +86,8 @@ int RelocInfo::target_address_size() {
void RelocInfo::set_target_address(Address target, WriteBarrierMode mode) {
ASSERT(IsCodeTarget(rmode_) || rmode_ == RUNTIME_ENTRY);
- Assembler::set_target_address_at(pc_, target);
+ Assembler::set_target_address_at(pc_, reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target) & ~3));
if (mode == UPDATE_WRITE_BARRIER && host() != NULL && IsCodeTarget(rmode_)) {
Object* target_code = Code::GetCodeFromTargetAddress(target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -165,24 +166,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
-static const int kNoCodeAgeSequenceLength = 3;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
Address RelocInfo::call_address() {
// The 2 instructions offset assumes patched debug break slot or return
// sequence.
@@ -256,8 +239,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -284,8 +265,6 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
@@ -494,12 +473,14 @@ void Assembler::set_target_pointer_at(Address pc, Address target) {
Address Assembler::target_address_at(Address pc) {
- return target_pointer_at(pc);
+ return reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target_pointer_at(pc)) & ~3);
}
void Assembler::set_target_address_at(Address pc, Address target) {
- set_target_pointer_at(pc, target);
+ set_target_pointer_at(pc, reinterpret_cast<Address>(
+ reinterpret_cast<intptr_t>(target) & ~3));
}
diff --git a/deps/v8/src/arm/assembler-arm.cc b/deps/v8/src/arm/assembler-arm.cc
index 47ea0e206..9be62a404 100644
--- a/deps/v8/src/arm/assembler-arm.cc
+++ b/deps/v8/src/arm/assembler-arm.cc
@@ -318,11 +318,46 @@ const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
const Instr kLdrStrOffsetMask = 0x00000fff;
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+// Spare buffer.
+static const int kMinimalBufferSize = 4*KB;
+
+
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Set up buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
@@ -335,6 +370,14 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
+ if (own_buffer_) {
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
}
@@ -2349,20 +2392,6 @@ void Assembler::vmul(const DwVfpRegister dst,
}
-void Assembler::vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond) {
- // Instruction details available in ARM DDI 0406C.b, A8-892.
- // cond(31-28) | 11100(27-23) | D=?(22) | 00(21-20) | Vn(19-16) |
- // Vd(15-12) | 101(11-9) | sz(8)=1 | N=?(7) | op(6)=0 | M=?(5) | 0(4) |
- // Vm(3-0)
- unsigned x = (cond | 0x1C*B23 | src1.code()*B16 |
- dst.code()*B12 | 0x5*B9 | B8 | src2.code());
- emit(x);
-}
-
-
void Assembler::vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -2701,9 +2730,9 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
b(&after_pool);
}
- // Put down constant pool marker "Undefined instruction".
- emit(kConstantPoolMarker |
- EncodeConstantPoolLength(num_pending_reloc_info_));
+ // Put down constant pool marker "Undefined instruction" as specified by
+ // A5.6 (ARMv7) Instruction set encoding.
+ emit(kConstantPoolMarker | num_pending_reloc_info_);
// Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
diff --git a/deps/v8/src/arm/assembler-arm.h b/deps/v8/src/arm/assembler-arm.h
index 3b9bb804f..dfcce6011 100644
--- a/deps/v8/src/arm/assembler-arm.h
+++ b/deps/v8/src/arm/assembler-arm.h
@@ -647,7 +647,15 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler();
+ ~Assembler();
+
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -1126,10 +1134,6 @@ class Assembler : public AssemblerBase {
const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond = al);
- void vmla(const DwVfpRegister dst,
- const DwVfpRegister src1,
- const DwVfpRegister src2,
- const Condition cond = al);
void vdiv(const DwVfpRegister dst,
const DwVfpRegister src1,
const DwVfpRegister src2,
@@ -1181,6 +1185,8 @@ class Assembler : public AssemblerBase {
// Jump unconditionally to given label.
void jmp(Label* L) { b(L, al); }
+ bool predictable_code_size() const { return predictable_code_size_; }
+
static bool use_immediate_embedded_pointer_loads(
const Assembler* assembler) {
#ifdef USE_BLX
@@ -1276,6 +1282,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ int pc_offset() const { return pc_ - buffer_; }
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Read/patch instructions
@@ -1321,8 +1329,6 @@ class Assembler : public AssemblerBase {
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
- STATIC_ASSERT((kConstantPoolLengthMaxMask & kMaxNumPendingRelocInfo) ==
- kMaxNumPendingRelocInfo);
// Postpone the generation of the constant pool for the specified number of
// instructions.
@@ -1337,6 +1343,8 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
+ bool emit_debug_code() const { return emit_debug_code_; }
+
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos
@@ -1378,6 +1386,13 @@ class Assembler : public AssemblerBase {
}
private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@@ -1386,6 +1401,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
+ byte* pc_; // the program counter; moves forward
// Constant pool generation
// Pools are emitted in the instruction stream, preferably after unconditional
@@ -1479,6 +1495,10 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
+
+ bool emit_debug_code_;
+ bool predictable_code_size_;
+
friend class PositionsRecorder;
friend class EnsureSpace;
};
@@ -1492,6 +1512,26 @@ class EnsureSpace BASE_EMBEDDED {
};
+class PredictableCodeSizeScope {
+ public:
+ explicit PredictableCodeSizeScope(Assembler* assembler)
+ : asm_(assembler) {
+ old_value_ = assembler->predictable_code_size();
+ assembler->set_predictable_code_size(true);
+ }
+
+ ~PredictableCodeSizeScope() {
+ if (!old_value_) {
+ asm_->set_predictable_code_size(false);
+ }
+ }
+
+ private:
+ Assembler* asm_;
+ bool old_value_;
+};
+
+
} } // namespace v8::internal
#endif // V8_ARM_ASSEMBLER_ARM_H_
diff --git a/deps/v8/src/arm/builtins-arm.cc b/deps/v8/src/arm/builtins-arm.cc
index 24d14e8c8..2d1d7b119 100644
--- a/deps/v8/src/arm/builtins-arm.cc
+++ b/deps/v8/src/arm/builtins-arm.cc
@@ -1226,39 +1226,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // r0 - contains return address (beginning of patch sequence)
- // r1 - function object
- FrameScope scope(masm, StackFrame::MANUAL);
- __ stm(db_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ PrepareCallCFunction(1, 0, r1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ ldm(ia_w, sp, r0.bit() | r1.bit() | fp.bit() | lr.bit());
- __ mov(pc, r0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/deps/v8/src/arm/code-stubs-arm.cc b/deps/v8/src/arm/code-stubs-arm.cc
index 9484f85f9..ceb108ffa 100644
--- a/deps/v8/src/arm/code-stubs-arm.cc
+++ b/deps/v8/src/arm/code-stubs-arm.cc
@@ -41,7 +41,8 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond);
+ Condition cond,
+ bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -626,6 +627,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (r0) to d6 or r2/r3.
+ LoadNumber(masm, destination,
+ r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (r1) to d7 or r0/r1.
+ LoadNumber(masm, destination,
+ r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -729,13 +748,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register scratch2,
SwVfpRegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
Label done;
@@ -744,57 +763,56 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ vmov(single_scratch, int_scratch);
__ vcvt_f64_s32(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
+ __ vmov(dst1, dst2, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst_exponent | dst_mantissa |
+ // | dst2 | dst1 |
// | s | exp | mantissa |
// Check for zero.
__ cmp(int_scratch, Operand::Zero());
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
__ b(eq, &done);
// Preload the sign of the value.
- __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
+ __ and_(dst2, int_scratch, Operand(HeapNumber::kSignMask), SetCC);
// Get the absolute value of the object (as an unsigned integer).
__ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2);
- __ rsb(dst_mantissa, dst_mantissa, Operand(31));
+ __ CountLeadingZeros(dst1, int_scratch, scratch2);
+ __ rsb(dst1, dst1, Operand(31));
// Set the exponent.
- __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Bfi(dst_exponent, scratch2, scratch2,
+ __ add(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Bfi(dst2, scratch2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ mov(scratch2, Operand(1));
- __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa));
+ __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst1));
- __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ cmp(dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
// Get the number of bits to set in the lower part of the mantissa.
- __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord),
- SetCC);
+ __ sub(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ b(mi, &fewer_than_20_useful_bits);
// Set the higher 20 bits of the mantissa.
- __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2));
+ __ orr(dst2, dst2, Operand(int_scratch, LSR, scratch2));
__ rsb(scratch2, scratch2, Operand(32));
- __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2));
+ __ mov(dst1, Operand(int_scratch, LSL, scratch2));
__ b(&done);
__ bind(&fewer_than_20_useful_bits);
- __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ rsb(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
__ mov(scratch2, Operand(int_scratch, LSL, scratch2));
- __ orr(dst_exponent, dst_exponent, scratch2);
+ __ orr(dst2, dst2, scratch2);
// Set dst1 to 0.
- __ mov(dst_mantissa, Operand::Zero());
+ __ mov(dst1, Operand::Zero());
}
__ bind(&done);
}
@@ -805,8 +823,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Destination destination,
DwVfpRegister double_dst,
DwVfpRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -822,8 +840,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
__ b(&done);
__ bind(&obj_is_not_smi);
@@ -850,52 +868,26 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ b(ne, not_int32);
if (destination == kCoreRegisters) {
- __ vmov(dst_mantissa, dst_exponent, double_dst);
+ __ vmov(dst1, dst2, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
- // Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
+ // Load the double value in the destination registers..
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
// Check for 0 and -0.
- Label zero;
- __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask));
- __ orr(scratch1, scratch1, Operand(dst_mantissa));
+ __ bic(scratch1, dst1, Operand(HeapNumber::kSignMask));
+ __ orr(scratch1, scratch1, Operand(dst2));
__ cmp(scratch1, Operand::Zero());
- __ b(eq, &zero);
+ __ b(eq, &done);
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Ldrd(dst_mantissa, dst_exponent,
- FieldMemOperand(object, HeapNumber::kValueOffset));
- __ b(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ b(not_int32);
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
}
__ bind(&done);
@@ -918,15 +910,14 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done, maybe_undefined;
+ Label done;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
@@ -973,28 +964,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ tst(scratch1, Operand(HeapNumber::kSignMask));
__ rsb(dst, dst, Operand::Zero(), LeaveCC, mi);
}
- __ b(&done);
-
- __ bind(&maybe_undefined);
- __ CompareRoot(object, Heap::kUndefinedValueRootIndex);
- __ b(ne, not_int32);
- // |undefined| is truncated to 0.
- __ mov(dst, Operand(Smi::FromInt(0)));
- // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
+ Register src1,
+ Register src2,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ubfx(scratch,
- src_exponent,
+ src1,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1014,11 +997,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ sub(tmp, scratch, Operand(src_exponent, LSR, 31));
+ __ sub(tmp, scratch, Operand(src1, LSR, 31));
__ cmp(tmp, Operand(30));
__ b(gt, not_int32);
// - Bits [21:0] in the mantissa are not null.
- __ tst(src_mantissa, Operand(0x3fffff));
+ __ tst(src2, Operand(0x3fffff));
__ b(ne, not_int32);
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1029,19 +1012,19 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ubfx(dst,
- src_mantissa,
+ src2,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
__ orr(dst,
dst,
- Operand(src_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ Operand(src1, LSL, HeapNumber::kNonMantissaBitsInTopWord));
// Create the mask and test the lower bits (of the higher bits).
__ rsb(scratch, scratch, Operand(32));
- __ mov(src_mantissa, Operand(1));
- __ mov(src_exponent, Operand(src_mantissa, LSL, scratch));
- __ sub(src_exponent, src_exponent, Operand(1));
- __ tst(dst, src_exponent);
+ __ mov(src2, Operand(1));
+ __ mov(src1, Operand(src2, LSL, scratch));
+ __ sub(src1, src1, Operand(1));
+ __ tst(dst, src1);
__ b(ne, not_int32);
}
@@ -1165,43 +1148,48 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cond) {
+ Condition cond,
+ bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
__ cmp(r0, r1);
__ b(ne, &not_identical);
- // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cond == lt || cond == gt) {
- __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, slow);
- } else {
- __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
- __ b(eq, &heap_number);
- // Comparing JS objects with <=, >= is complicated.
- if (cond != eq) {
- __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cond != eq || !never_nan_nan) {
+ // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cond == lt || cond == gt) {
+ __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, slow);
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cond == le || cond == ge) {
- __ cmp(r4, Operand(ODDBALL_TYPE));
- __ b(ne, &return_equal);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, r2);
- __ b(ne, &return_equal);
- if (cond == le) {
- // undefined <= undefined should fail.
- __ mov(r0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ mov(r0, Operand(LESS));
+ } else {
+ __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
+ __ b(eq, &heap_number);
+ // Comparing JS objects with <=, >= is complicated.
+ if (cond != eq) {
+ __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ __ b(ge, slow);
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cond == le || cond == ge) {
+ __ cmp(r4, Operand(ODDBALL_TYPE));
+ __ b(ne, &return_equal);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ __ cmp(r0, r2);
+ __ b(ne, &return_equal);
+ if (cond == le) {
+ // undefined <= undefined should fail.
+ __ mov(r0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ mov(r0, Operand(LESS));
+ }
+ __ Ret();
}
- __ Ret();
}
}
}
@@ -1216,45 +1204,47 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cond != lt && cond != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
- // NaNs have all-one exponents so they sign extend to -1.
- __ cmp(r3, Operand(-1));
- __ b(ne, &return_equal);
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
- // Or with all low-bits of mantissa.
- __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ orr(r0, r3, Operand(r2), SetCC);
- // For equal we already have the right value in r0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load r0 with the failing
- // value if it's a NaN.
- if (cond != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq);
- if (cond == le) {
- __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ if (cond != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cond != lt && cond != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
+ // NaNs have all-one exponents so they sign extend to -1.
+ __ cmp(r3, Operand(-1));
+ __ b(ne, &return_equal);
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
+ // Or with all low-bits of mantissa.
+ __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ orr(r0, r3, Operand(r2), SetCC);
+ // For equal we already have the right value in r0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load r0 with the failing
+ // value if it's a NaN.
+ if (cond != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq);
+ if (cond == le) {
+ __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
+ __ Ret();
}
- __ Ret();
+ // No fall through here.
}
- // No fall through here.
__ bind(&not_identical);
}
@@ -1688,60 +1678,42 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::HEAP_NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about symbol/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry r1 and r2 are the values to be compared.
+// On entry lhs_ and rhs_ are the values to be compared.
// On exit r0 is 0, positive or negative to indicate the result of
// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = r1;
- Register rhs = r0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles, lhs_not_nan;
- Label not_two_smis, smi_done;
- __ orr(r2, r1, r0);
- __ JumpIfNotSmi(r2, &not_two_smis);
- __ mov(r1, Operand(r1, ASR, 1));
- __ sub(r0, r1, Operand(r0, ASR, 1));
- __ Ret();
- __ bind(&not_two_smis);
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ orr(r2, r1, r0);
+ __ JumpIfNotSmi(r2, &not_two_smis);
+ __ mov(r1, Operand(r1, ASR, 1));
+ __ sub(r0, r1, Operand(r0, ASR, 1));
+ __ Ret();
+ __ bind(&not_two_smis);
+ } else if (FLAG_debug_code) {
+ __ orr(r2, r1, r0);
+ __ tst(r2, Operand(kSmiTagMask));
+ __ Assert(ne, "CompareStub: unexpected smi operands.");
+ }
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ and_(r2, lhs, Operand(rhs));
+ __ and_(r2, lhs_, Operand(rhs_));
__ JumpIfNotSmi(r2, &not_smis);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1752,7 +1724,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// comparison. If VFP3 is supported the double values of the numbers have
// been loaded into d7 and d6. Otherwise, the double values have been loaded
// into r0, r1, r2, and r3.
- EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_);
__ bind(&both_loaded_as_doubles);
// The arguments have been converted to doubles and stored in d6 and d7, if
@@ -1775,7 +1747,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// If one of the sides was a NaN then the v flag is set. Load r0 with
// whatever it takes to make the comparison fail, since comparisons with NaN
// always fail.
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
__ mov(r0, Operand(GREATER));
} else {
__ mov(r0, Operand(LESS));
@@ -1784,19 +1756,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds lhs_not_nan.
- EmitNanCheck(masm, &lhs_not_nan, cc);
+ EmitNanCheck(masm, &lhs_not_nan, cc_);
// Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the
// answer. Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ EmitTwoNonNanDoubleComparison(masm, cc_);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in rhs_ and lhs_.
- if (strict()) {
+ if (strict_) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
}
Label check_for_symbols;
@@ -1806,8 +1778,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case r2 will contain the type of rhs_. Never falls through.
EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
@@ -1815,31 +1787,31 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&check_for_symbols);
// In the strict case the EmitStrictTwoHeapObjectCompare already took care of
// symbols.
- if (cc == eq && !strict()) {
+ if (cc_ == eq && !strict_) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that r2 is the type of rhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, r2, r3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, r2, r3);
- if (cc == eq) {
+ if (cc_ == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
r2,
r3,
r4);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
r2,
r3,
r4,
@@ -1849,18 +1821,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
- __ Push(lhs, rhs);
+ __ Push(lhs_, rhs_);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
ncr = GREATER;
} else {
- ASSERT(cc == gt || cc == ge); // remaining cases
+ ASSERT(cc_ == gt || cc_ == ge); // remaining cases
ncr = LESS;
}
__ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1870,9 +1842,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
}
@@ -2356,23 +2325,20 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(VFP2);
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(r1, r0);
__ mov(r2, Operand(Smi::FromInt(MinorKey())));
- __ push(r2);
+ __ mov(r1, Operand(Smi::FromInt(op_)));
+ __ mov(r0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(r2, r1, r0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
@@ -2383,8 +2349,59 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+}
+
+
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2394,7 +2411,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ add(right, left, Operand(right), SetCC); // Add optimistically.
__ Ret(vc);
@@ -2509,24 +2526,10 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
Register left = r1;
Register right = r0;
Register scratch1 = r7;
@@ -2538,17 +2541,11 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
Register heap_number_map = r6;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op) {
+ switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2558,44 +2555,25 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// depending on whether VFP3 is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(VFP2) &&
- op != Token::MOD ?
+ op_ != Token::MOD ?
FloatingPointHelper::kVFPRegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- // Load right operand to d7 or r2/r3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, d7, d8, r2, r3, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, d7, r2, r3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to d6 or r0/r1. This keeps r0/r1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, d6, d8, r0, r1, heap_number_map,
- scratch1, scratch2, s0, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, d6, r0, r1, heap_number_map,
- scratch1, scratch2, fail);
- }
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
}
// Calculate the result.
@@ -2604,7 +2582,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// d6: Left value
// d7: Right value
CpuFeatures::Scope scope(VFP2);
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ vadd(d5, d6, d7);
break;
@@ -2628,7 +2606,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
+ op_,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2669,7 +2647,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
}
Label result_not_a_smi;
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
__ orr(r2, r3, Operand(r2));
break;
@@ -2720,9 +2698,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
}
// r2: Answer as signed int32.
@@ -2737,7 +2714,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(VFP2);
__ vmov(s0, r2);
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ vcvt_f64_u32(d0, s0);
} else {
__ vcvt_f64_s32(d0, s0);
@@ -2762,14 +2739,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void BinaryOpStub::GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
Register left = r1;
@@ -2782,14 +2757,12 @@ void BinaryOpStub_GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
+ GenerateSmiSmiOperation(masm);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
}
__ bind(&not_smis);
}
@@ -2801,14 +2774,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2816,14 +2789,23 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2852,7 +2834,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
Register left = r1;
Register right = r0;
@@ -2874,7 +2856,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ orr(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
+ GenerateSmiSmiOperation(masm);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2884,15 +2866,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers r0 and r1 (right
// and left) are preserved for the runtime call.
@@ -2991,13 +2964,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
: BinaryOpIC::INT32)) {
// We are using vfp registers so r5 is available.
heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
__ sub(r0, heap_number_result, Operand(kHeapObjectTag));
__ vstr(d5, r0, HeapNumber::kValueOffset);
__ mov(r0, heap_number_result);
@@ -3016,13 +2988,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
// Load the left value from the value saved on the stack.
__ Pop(r1, r0);
@@ -3127,13 +3098,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = r5;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
@@ -3177,7 +3147,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3216,32 +3185,20 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
+ Label call_runtime, call_string_add_or_runtime;
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3249,7 +3206,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3285,20 +3241,61 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(r0) && !result.is(r1));
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3311,7 +3308,7 @@ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
__ mov(result, Operand(overwritable_operand));
__ bind(&allocated);
} else {
- ASSERT(mode == NO_OVERWRITE);
+ ASSERT(mode_ == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -4926,7 +4923,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// subject: Subject string
// regexp_data: RegExp data (FixedArray)
// r0: Instance type of subject string
- STATIC_ASSERT(4 == kOneByteStringTag);
+ STATIC_ASSERT(4 == kAsciiStringTag);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ and_(r0, r0, Operand(kStringEncodingMask));
@@ -5150,7 +5147,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ ldr(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ sub(subject,
subject,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -5428,6 +5425,48 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+void CompareStub::PrintName(StringStream* stream) {
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(r0) ? "_r0" : "_r1");
+ stream->Add(rhs_.is(r0) ? "_r0" : "_r1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT((static_cast<unsigned>(cc_) >> 28) < (1 << 12));
+ ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
+ (lhs_.is(r1) && rhs_.is(r0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
+ | RegisterField::encode(lhs_.is(r0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
// StringCharCodeAtGenerator
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -5877,7 +5916,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ ldrh(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ ldrh(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
__ cmp(chars, scratch);
__ b(eq, &found_in_symbol_table);
__ bind(&next_probe[i]);
@@ -5960,28 +5999,23 @@ void SubStringStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTag == 0);
STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
- // Arithmetic shift right by one un-smi-tags. In this case we rotate right
- // instead because we bail out on non-smi values: ROR and ASR are equivalent
- // for smis but they set the flags in a way that's easier to optimize.
- __ mov(r2, Operand(r2, ROR, 1), SetCC);
- __ mov(r3, Operand(r3, ROR, 1), SetCC, cc);
- // If either to or from had the smi tag bit set, then C is set now, and N
- // has the same value: we rotated by 1, so the bottom bit is now the top bit.
+ // I.e., arithmetic shift right by one un-smi-tags.
+ __ mov(r2, Operand(r2, ASR, 1), SetCC);
+ __ mov(r3, Operand(r3, ASR, 1), SetCC, cc);
+ // If either to or from had the smi tag bit set, then carry is set now.
+ __ b(cs, &runtime); // Either "from" or "to" is not a smi.
// We want to bailout to runtime here if From is negative. In that case, the
// next instruction is not executed and we fall through to bailing out to
- // runtime.
- // Executed if both r2 and r3 are untagged integers.
- __ sub(r2, r2, Operand(r3), SetCC, cc);
- // One of the above un-smis or the above SUB could have set N==1.
- __ b(mi, &runtime); // Either "from" or "to" is not an smi, or from > to.
+ // runtime. pl is the opposite of mi.
+ // Both r2 and r3 are untagged integers.
+ __ sub(r2, r2, Operand(r3), SetCC, pl);
+ __ b(mi, &runtime); // Fail if from > to.
// Make sure first argument is a string.
__ ldr(r0, MemOperand(sp, kStringOffset));
STATIC_ASSERT(kSmiTag == 0);
- // Do a JumpIfSmi, but fold its jump into the subsequent string test.
- __ tst(r0, Operand(kSmiTagMask));
- Condition is_string = masm->IsObjectStringType(r0, r1, ne);
- ASSERT(is_string == eq);
+ __ JumpIfSmi(r0, &runtime);
+ Condition is_string = masm->IsObjectStringType(r0, r1);
__ b(NegateCondition(is_string), &runtime);
// Short-cut for the case of trivial substring.
@@ -6052,7 +6086,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_slice);
@@ -6090,12 +6124,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ add(r5, r5, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ tst(r1, Operand(kStringEncodingMask));
__ b(eq, &two_byte_sequential);
@@ -6105,13 +6139,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// Locate first character of substring to copy.
__ add(r5, r5, r3);
// Locate first character of result.
- __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r1, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// r0: result string
// r1: first character of result string
// r2: result string length
// r5: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(masm, r1, r5, r2, r3, r4, r6, r7, r9,
COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_r0);
@@ -6236,7 +6270,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ add(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ add(left, left, Operand(scratch1));
__ add(right, right, Operand(scratch1));
__ rsb(length, length, Operand::Zero());
@@ -6389,8 +6423,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ ldrb(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
- __ ldrb(r3, FieldMemOperand(r1, SeqOneByteString::kHeaderSize));
+ __ ldrb(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
+ __ ldrb(r3, FieldMemOperand(r1, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6409,7 +6443,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode)
__ mov(r6, Operand(2));
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ strh(r2, FieldMemOperand(r0, SeqOneByteString::kHeaderSize));
+ __ strh(r2, FieldMemOperand(r0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, r2, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
__ Ret();
@@ -6459,6 +6493,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ tst(r4, Operand(kAsciiDataHintMask));
__ tst(r5, Operand(kAsciiDataHintMask), ne);
__ b(ne, &ascii_data);
+ __ eor(r4, r4, Operand(r5));
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(r4, r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmp(r4, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ b(eq, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(r7, r6, r4, r5, &call_runtime);
@@ -6491,10 +6530,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r4, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r7,
r0,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &first_prepared);
@@ -6507,10 +6546,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ tst(r5, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ add(r1,
r1,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag),
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag),
LeaveCC,
eq);
__ b(eq, &second_prepared);
@@ -6533,7 +6572,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ b(eq, &non_ascii_string_add_flat_result);
__ AllocateAsciiString(r0, r6, r4, r5, r9, &call_runtime);
- __ add(r6, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(r6, r0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// r0: result string.
// r7: first character of first string.
// r1: first character of second string.
@@ -6624,7 +6663,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ orr(r2, r1, r0);
__ JumpIfNotSmi(r2, &miss);
@@ -6645,53 +6684,31 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBER);
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
+ __ and_(r2, r1, Operand(r0));
+ __ JumpIfSmi(r2, &generic_stub);
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(r0, &miss);
- }
+ __ CompareObjectType(r0, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &maybe_undefined1);
+ __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
+ __ b(ne, &maybe_undefined2);
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or VFP2 is unsupported.
+ // stub if NaN is involved or VFP3 is unsupported.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatures::Scope scope(VFP2);
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(r0, &right_smi);
- __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ sub(r2, r0, Operand(kHeapObjectTag));
- __ vldr(d1, r2, HeapNumber::kValueOffset);
- __ b(&left);
- __ bind(&right_smi);
- __ SmiUntag(r2, r0); // Can't clobber r0 yet.
- SwVfpRegister single_scratch = d2.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d1, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(r1, &left_smi);
- __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
+ // Load left and right operand
__ sub(r2, r1, Operand(kHeapObjectTag));
__ vldr(d0, r2, HeapNumber::kValueOffset);
- __ b(&done);
- __ bind(&left_smi);
- __ SmiUntag(r2, r1); // Can't clobber r1 yet.
- single_scratch = d3.low();
- __ vmov(single_scratch, r2);
- __ vcvt_f64_s32(d0, single_scratch);
+ __ sub(r2, r0, Operand(kHeapObjectTag));
+ __ vldr(d1, r2, HeapNumber::kValueOffset);
- __ bind(&done);
- // Compare operands.
+ // Compare operands
__ VFPCompareAndSetFlags(d0, d1);
// Don't base result on status bits when a NaN is involved.
@@ -6705,16 +6722,14 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
__ b(ne, &miss);
- __ JumpIfSmi(r1, &unordered);
__ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE);
__ b(ne, &maybe_undefined2);
__ jmp(&unordered);
@@ -6732,7 +6747,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOL);
+ ASSERT(state_ == CompareIC::SYMBOLS);
Label miss;
// Registers containing left and right operands respectively.
@@ -6770,7 +6785,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6848,7 +6863,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ and_(r2, r1, Operand(r0));
__ JumpIfSmi(r2, &miss);
@@ -7381,7 +7396,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(r0));
__ Move(address, regs_.address());
__ Move(r0, regs_.object());
- __ Move(r1, address);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(r1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ ldr(r1, MemOperand(address, 0));
+ }
__ mov(r2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7539,7 +7559,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(r0, r3,
+ __ StoreNumberToDoubleElements(r0, r3, r1,
// Overwrites all regs after this.
r5, r6, r7, r9, r2,
&slow_elements);
@@ -7549,7 +7569,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
if (entry_hook_ != NULL) {
- PredictableCodeSizeScope predictable(masm, 4 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm);
ProfileEntryHookStub stub;
__ push(lr);
__ CallStub(&stub);
diff --git a/deps/v8/src/arm/code-stubs-arm.h b/deps/v8/src/arm/code-stubs-arm.h
index 0443cf799..3e796249c 100644
--- a/deps/v8/src/arm/code-stubs-arm.h
+++ b/deps/v8/src/arm/code-stubs-arm.h
@@ -142,6 +142,108 @@ class UnaryOpStub: public CodeStub {
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ use_vfp2_ = CpuFeatures::IsSupported(VFP2);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_vfp2_(VFP2Bits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_vfp2_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class VFP2Bits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | VFP2Bits::encode(use_vfp2_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -622,6 +724,20 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
+ // Loads objects from r0 and r1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
+ // floating point registers VFP3 must be supported. If core registers are
+ // requested when VFP3 is supported d6 and d7 will still be scratched. If
+ // either r0 or r1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with r0 and r1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -720,12 +836,7 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kVFPRegisters, then VFP3
- // must be supported. If kCoreRegisters are requested and VFP3 is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
+ private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
diff --git a/deps/v8/src/arm/codegen-arm.cc b/deps/v8/src/arm/codegen-arm.cc
index bb771b18e..09166c3c0 100644
--- a/deps/v8/src/arm/codegen-arm.cc
+++ b/deps/v8/src/arm/codegen-arm.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
-#include "simulator-arm.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,74 +49,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_arm_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_arm_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(VFP2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_vfp(VFP2);
- DoubleRegister input = d0;
- DoubleRegister result = d1;
- DoubleRegister double_scratch1 = d2;
- DoubleRegister double_scratch2 = d3;
- Register temp1 = r4;
- Register temp2 = r5;
- Register temp3 = r6;
-
- if (masm.use_eabi_hardfloat()) {
- // Input value is in d0 anyway, nothing to do.
- } else {
- __ vmov(input, r0, r1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (masm.use_eabi_hardfloat()) {
- __ vmov(d0, result);
- } else {
- __ vmov(r0, r1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_arm_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -141,8 +73,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-#define __ ACCESS_MASM(masm)
-
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -262,7 +192,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
r3,
r9,
- kLRHasNotBeenSaved,
+ kLRHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -486,7 +416,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ b(ne, &external_string);
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ add(string,
string,
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
@@ -520,188 +450,8 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ tst(index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index");
- __ tst(value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value");
-
- __ ldr(ip, FieldMemOperand(string, String::kLengthOffset));
- __ cmp(index, ip);
- __ Check(lt, "Index is too large");
-
- __ cmp(index, Operand(Smi::FromInt(0)));
- __ Check(ge, "Index is negative");
-
- __ ldr(ip, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
-
- __ and_(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(ip, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type");
- }
-
- __ add(ip,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value, value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- // Smis are tagged by left shift by 1, thus LSR by 1 to smi-untag inline.
- __ strb(value, MemOperand(ip, index, LSR, 1));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ strh(value, MemOperand(ip, index));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ vldr(double_scratch1, ExpConstant(0, temp3));
- __ vmov(result, kDoubleRegZero);
- __ VFPCompareAndSetFlags(double_scratch1, input);
- __ b(ge, &done);
- __ vldr(double_scratch2, ExpConstant(1, temp3));
- __ VFPCompareAndSetFlags(input, double_scratch2);
- __ vldr(result, ExpConstant(2, temp3));
- __ b(ge, &done);
- __ vldr(double_scratch1, ExpConstant(3, temp3));
- __ vldr(result, ExpConstant(4, temp3));
- __ vmul(double_scratch1, double_scratch1, input);
- __ vadd(double_scratch1, double_scratch1, result);
- __ vmov(temp2, temp1, double_scratch1);
- __ vsub(double_scratch1, double_scratch1, result);
- __ vldr(result, ExpConstant(6, temp3));
- __ vldr(double_scratch2, ExpConstant(5, temp3));
- __ vmul(double_scratch1, double_scratch1, double_scratch2);
- __ vsub(double_scratch1, double_scratch1, input);
- __ vsub(result, result, double_scratch1);
- __ vmul(input, double_scratch1, double_scratch1);
- __ vmul(result, result, input);
- __ mov(temp1, Operand(temp2, LSR, 11));
- __ vldr(double_scratch2, ExpConstant(7, temp3));
- __ vmul(result, result, double_scratch2);
- __ vsub(result, result, double_scratch1);
- __ vldr(double_scratch2, ExpConstant(8, temp3));
- __ vadd(result, result, double_scratch2);
- __ movw(ip, 0x7ff);
- __ and_(temp2, temp2, Operand(ip));
- __ add(temp1, temp1, Operand(0x3ff));
- __ mov(temp1, Operand(temp1, LSL, 20));
-
- // Must not call ExpConstant() after overwriting temp3!
- __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ ldr(ip, MemOperand(temp3, temp2, LSL, 3));
- __ add(temp3, temp3, Operand(kPointerSize));
- __ ldr(temp2, MemOperand(temp3, temp2, LSL, 3));
- __ orr(temp1, temp1, temp2);
- __ vmov(input, ip, temp1);
- __ vmul(result, result, input);
- __ bind(&done);
-}
-
#undef __
-// add(r0, pc, Operand(-8))
-static const uint32_t kCodeAgePatchFirstInstruction = 0xe24f0008;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- PredictableCodeSizeScope scope(patcher.masm(), *length);
- patcher.masm()->stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- patcher.masm()->LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- patcher.masm()->add(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- patcher.masm()->add(r0, pc, Operand(-8));
- patcher.masm()->ldr(pc, MemOperand(pc, -4));
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/arm/codegen-arm.h b/deps/v8/src/arm/codegen-arm.h
index 8f0033e2c..c340e6b10 100644
--- a/deps/v8/src/arm/codegen-arm.h
+++ b/deps/v8/src/arm/codegen-arm.h
@@ -88,22 +88,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_H_
diff --git a/deps/v8/src/arm/constants-arm.h b/deps/v8/src/arm/constants-arm.h
index a569383f2..4fa49e3d3 100644
--- a/deps/v8/src/arm/constants-arm.h
+++ b/deps/v8/src/arm/constants-arm.h
@@ -84,18 +84,9 @@ namespace v8 {
namespace internal {
// Constant pool marker.
-// Use UDF, the permanently undefined instruction.
-const int kConstantPoolMarkerMask = 0xfff000f0;
-const int kConstantPoolMarker = 0xe7f000f0;
-const int kConstantPoolLengthMaxMask = 0xffff;
-inline int EncodeConstantPoolLength(int length) {
- ASSERT((length & kConstantPoolLengthMaxMask) == length);
- return ((length & 0xfff0) << 4) | (length & 0xf);
-}
-inline int DecodeConstantPoolLength(int instr) {
- ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
- return ((instr >> 4) & 0xfff0) | (instr & 0xf);
-}
+const int kConstantPoolMarkerMask = 0xffe00000;
+const int kConstantPoolMarker = 0x0c000000;
+const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode.
const int kNumRegisters = 16;
diff --git a/deps/v8/src/arm/deoptimizer-arm.cc b/deps/v8/src/arm/deoptimizer-arm.cc
index ee2a581a5..19667b9d5 100644
--- a/deps/v8/src/arm/deoptimizer-arm.cc
+++ b/deps/v8/src/arm/deoptimizer-arm.cc
@@ -114,6 +114,7 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
}
+static const int32_t kBranchBeforeStackCheck = 0x2a000001;
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
@@ -122,21 +123,24 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // The back edge bookkeeping code matches the pattern:
- //
- // <decrement profiling counter>
- // 2a 00 00 01 bpl ok
+ // The call of the stack guard check has the following form:
+ // e1 5d 00 0c cmp sp, <limit>
+ // 2a 00 00 01 bcs ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// We patch the code to the following form:
- //
- // <decrement profiling counter>
+ // e1 5d 00 0c cmp sp, <limit>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
@@ -173,9 +177,15 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
- patcher.masm()->b(+16, pl);
- ASSERT_EQ(kBranchBeforeInterrupt,
- Memory::int32_at(pc_after - 3 * kInstrSize));
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->b(+16, pl);
+ ASSERT_EQ(kBranchBeforeInterrupt,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ } else {
+ patcher.masm()->b(+4, cs);
+ ASSERT_EQ(kBranchBeforeStackCheck,
+ Memory::int32_at(pc_after - 3 * kInstrSize));
+ }
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
diff --git a/deps/v8/src/arm/disasm-arm.cc b/deps/v8/src/arm/disasm-arm.cc
index cb0a6cb5c..3c94a46e6 100644
--- a/deps/v8/src/arm/disasm-arm.cc
+++ b/deps/v8/src/arm/disasm-arm.cc
@@ -1098,7 +1098,6 @@ int Decoder::DecodeType7(Instruction* instr) {
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
-// Dd = vmla(Dn, Dm)
// Dd = vdiv(Dn, Dm)
// vcmp(Dd, Dm)
// vmrs
@@ -1161,12 +1160,6 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} else {
Unknown(instr); // Not used by V8.
}
- } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
- if (instr->SzValue() == 0x1) {
- Format(instr, "vmla.f64'cond 'Dd, 'Dn, 'Dm");
- } else {
- Unknown(instr); // Not used by V8.
- }
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
if (instr->SzValue() == 0x1) {
Format(instr, "vdiv.f64'cond 'Dd, 'Dn, 'Dm");
@@ -1395,7 +1388,7 @@ bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
- return DecodeConstantPoolLength(instruction_bits);
+ return instruction_bits & kConstantPoolLengthMask;
} else {
return -1;
}
@@ -1417,7 +1410,8 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
- DecodeConstantPoolLength(instruction_bits));
+ instruction_bits &
+ kConstantPoolLengthMask);
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
diff --git a/deps/v8/src/arm/full-codegen-arm.cc b/deps/v8/src/arm/full-codegen-arm.cc
index 3b560fedf..be8228377 100644
--- a/deps/v8/src/arm/full-codegen-arm.cc
+++ b/deps/v8/src/arm/full-codegen-arm.cc
@@ -130,7 +130,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -164,19 +164,14 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
- info->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ Push(lr, fp, cp, r1);
+ if (locals_count > 0) {
// Load undefined value here, so the value is ready for the loop
// below.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
}
+ // Adjust fp to point to caller's fp.
+ __ add(fp, sp, Operand(2 * kPointerSize));
{ Comment cmnt(masm_, "[ Allocate locals");
for (int i = 0; i < locals_count; i++) {
@@ -292,7 +287,7 @@ void FullCodeGenerator::Generate() {
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
__ cmp(sp, Operand(ip));
__ b(hs, &ok);
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_);
StackCheckStub stub;
__ CallStub(&stub);
__ bind(&ok);
@@ -347,31 +342,42 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Stack check");
// Block literal pools whilst emitting stack check code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ b(pl, &ok);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+ __ cmp(sp, Operand(ip));
+ __ b(hs, &ok);
+ PredictableCodeSizeScope predictable(masm_);
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
- EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- InterruptStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ RecordStackCheck(stmt->OsrEntryId());
- EmitProfilingCounterReset();
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -433,8 +439,7 @@ void FullCodeGenerator::EmitReturnSequence() {
// tool from instrumenting as we rely on the code size here.
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
- PredictableCodeSizeScope predictable(masm_, -1);
+ PredictableCodeSizeScope predictable(masm_);
__ RecordJSReturn();
masm_->mov(sp, fp);
masm_->ldm(ia_w, sp, fp.bit() | lr.bit());
@@ -909,33 +914,34 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ Handle<JSModule> instance = declaration->module()->interface()->Instance();
+ ASSERT(!instance.is_null());
- // Load instance object.
- __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
- __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ Visit(declaration->module());
+ break;
+ }
- // Assign it.
- __ str(r1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- r1,
- r3,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ mov(r1, Operand(instance));
+ __ str(r1, ContextOperand(cp, variable->index()));
+ Visit(declaration->module());
+ break;
+ }
- // Traverse into body.
- Visit(declaration->module());
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
}
@@ -978,14 +984,6 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1240,7 +1238,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ add(r0, r0, Operand(Smi::FromInt(1)));
__ push(r0);
- EmitBackEdgeBookkeeping(stmt, &loop);
+ EmitStackCheck(stmt, &loop);
__ b(&loop);
// Remove the pointers stored on the stack.
@@ -1393,9 +1391,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
if (local->mode() == CONST) {
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
@@ -2376,7 +2374,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -3131,39 +3129,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(r2);
- __ pop(r1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, r0, r1, r2);
- context()->Plug(r0);
-}
-
-
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3656,7 +3621,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ ldr(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ add(string_length, string_length, Operand(scratch1), SetCC);
__ b(vs, &bailout);
__ cmp(element, elements_end);
@@ -3685,7 +3650,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ sub(string_length, string_length, Operand(scratch1));
__ smull(scratch2, ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3723,10 +3688,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ add(result_pos,
result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ ldr(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ cmp(scratch1, Operand(Smi::FromInt(1)));
__ b(eq, &one_char_separator);
__ b(gt, &long_separator);
@@ -3742,9 +3707,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &empty_separator_loop); // End while (element < elements_end).
@@ -3754,7 +3717,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ __ ldrb(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
__ jmp(&one_char_separator_loop_entry);
@@ -3774,9 +3737,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &one_char_separator_loop); // End while (element < elements_end).
@@ -3797,16 +3758,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ add(string,
separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
__ ldr(string, MemOperand(element, kPointerSize, PostIndex));
__ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(string, string, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
__ b(lt, &long_separator_loop); // End while (element < elements_end).
@@ -4111,8 +4070,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// Call stub. Undo operation first.
__ sub(r0, r0, Operand(Smi::FromInt(count_value)));
}
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(count_value)));
+ __ mov(r1, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
@@ -4337,7 +4295,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cond = CompareIC::ComputeCondition(op);
+ Condition cond = eq;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ cond = eq;
+ break;
+ case Token::LT:
+ cond = lt;
+ break;
+ case Token::GT:
+ cond = gt;
+ break;
+ case Token::LTE:
+ cond = le;
+ break;
+ case Token::GTE:
+ cond = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
__ pop(r1);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/deps/v8/src/arm/ic-arm.cc b/deps/v8/src/arm/ic-arm.cc
index 29a3687aa..48395897d 100644
--- a/deps/v8/src/arm/ic-arm.cc
+++ b/deps/v8/src/arm/ic-arm.cc
@@ -1379,6 +1379,7 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
+ receiver,
elements, // Overwritten.
r3, // Scratch regs...
r4,
@@ -1698,15 +1699,36 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address cmp_instruction_address =
- Assembler::return_address_from_call_start(address);
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, r1, r0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
- // If the instruction following the call is not a cmp rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(cmp_instruction_address);
- return Assembler::IsCmpImmediate(instr);
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
diff --git a/deps/v8/src/arm/lithium-arm.cc b/deps/v8/src/arm/lithium-arm.cc
index 420367373..21c549f17 100644
--- a/deps/v8/src/arm/lithium-arm.cc
+++ b/deps/v8/src/arm/lithium-arm.cc
@@ -177,7 +177,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
case Token::SHL: return "shl-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -297,11 +296,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -378,27 +372,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
+ stream->Add("] <- ");
value()->PrintTo(stream);
}
@@ -1046,15 +1033,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
@@ -1130,11 +1108,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1333,21 +1306,8 @@ LInstruction* LChunkBuilder::DoMul(HMul* instr) {
return DefineAsRegister(mul);
} else if (instr->representation().IsDouble()) {
- if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
- HAdd* add = HAdd::cast(instr->uses().value());
- if (instr == add->left()) {
- // This mul is the lhs of an add. The add and mul will be folded
- // into a multiply-add.
- return NULL;
- }
- if (instr == add->right() && !add->left()->IsMul()) {
- // This mul is the rhs of an add, where the lhs is not another mul.
- // The add and mul will be folded into a multiply-add.
- return NULL;
- }
- }
-
return DoArithmeticD(Token::MUL, instr);
+
} else {
return DoArithmeticT(Token::MUL, instr);
}
@@ -1358,12 +1318,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
-
- if (instr->left()->IsConstant()) {
- // If lhs is constant, do reverse subtraction instead.
- return DoRSub(instr);
- }
-
LOperand* left = UseRegisterAtStart(instr->left());
LOperand* right = UseOrConstantAtStart(instr->right());
LSubI* sub = new(zone()) LSubI(left, right);
@@ -1380,32 +1334,6 @@ LInstruction* LChunkBuilder::DoSub(HSub* instr) {
}
-LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
- ASSERT(instr->representation().IsInteger32());
- ASSERT(instr->left()->representation().IsInteger32());
- ASSERT(instr->right()->representation().IsInteger32());
-
- // Note: The lhs of the subtraction becomes the rhs of the
- // reverse-subtraction.
- LOperand* left = UseRegisterAtStart(instr->right());
- LOperand* right = UseOrConstantAtStart(instr->left());
- LRSubI* rsb = new(zone()) LRSubI(left, right);
- LInstruction* result = DefineAsRegister(rsb);
- if (instr->CheckFlag(HValue::kCanOverflow)) {
- result = AssignEnvironment(result);
- }
- return result;
-}
-
-
-LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
- LOperand* multiplier_op = UseRegisterAtStart(mul->left());
- LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
- LOperand* addend_op = UseRegisterAtStart(addend);
- return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
- multiplicand_op));
-}
-
LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
if (instr->representation().IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
@@ -1419,14 +1347,6 @@ LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
}
return result;
} else if (instr->representation().IsDouble()) {
- if (instr->left()->IsMul())
- return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
-
- if (instr->right()->IsMul()) {
- ASSERT(!instr->left()->IsMul());
- return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
- }
-
return DoArithmeticD(Token::ADD, instr);
} else {
ASSERT(instr->representation().IsTagged());
@@ -1492,7 +1412,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1646,16 +1566,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1779,10 +1689,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
+ LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
+ LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(result);
}
@@ -1950,40 +1860,53 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
+}
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
- DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1997,48 +1920,66 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
- return new(zone()) LStoreKeyed(object, key, val);
- }
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ LOperand* key = UseRegisterOrConstant(instr->key());
+
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2261,7 +2202,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
+ for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/deps/v8/src/arm/lithium-arm.h b/deps/v8/src/arm/lithium-arm.h
index 7397b4bc8..fb36fe9c0 100644
--- a/deps/v8/src/arm/lithium-arm.h
+++ b/deps/v8/src/arm/lithium-arm.h
@@ -125,18 +125,18 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
- V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
- V(MultiplyAddD) \
V(NumberTagD) \
V(NumberTagI) \
V(NumberTagU) \
@@ -150,7 +150,6 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
- V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -158,8 +157,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -168,7 +169,6 @@ class LCodeGen;
V(StringCompareAndBranch) \
V(StringLength) \
V(SubI) \
- V(RSubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
@@ -625,24 +625,6 @@ class LMulI: public LTemplateInstruction<1, 2, 1> {
};
-// Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD: public LTemplateInstruction<1, 3, 0> {
- public:
- LMultiplyAddD(LOperand* addend, LOperand* multiplier,
- LOperand* multiplicand) {
- inputs_[0] = addend;
- inputs_[1] = multiplier;
- inputs_[2] = multiplicand;
- }
-
- LOperand* addend() { return inputs_[0]; }
- LOperand* multiplier() { return inputs_[1]; }
- LOperand* multiplicand() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
-};
-
-
class LCmpIDAndBranch: public LControlInstruction<2, 0> {
public:
LCmpIDAndBranch(LOperand* left, LOperand* right) {
@@ -658,7 +640,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->representation().IsDouble();
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -683,30 +665,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1031,21 +989,6 @@ class LSubI: public LTemplateInstruction<1, 2, 0> {
};
-class LRSubI: public LTemplateInstruction<1, 2, 0> {
- public:
- LRSubI(LOperand* left, LOperand* right) {
- inputs_[0] = left;
- inputs_[1] = right;
- }
-
- LOperand* left() { return inputs_[0]; }
- LOperand* right() { return inputs_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
- DECLARE_HYDROGEN_ACCESSOR(Sub)
-};
-
-
class LConstantI: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
@@ -1199,30 +1142,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1438,26 +1357,58 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
}
- bool is_external() const {
- return hydrogen()->is_external();
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1971,28 +1922,51 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
+ LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -2016,6 +1990,28 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
@@ -2138,7 +2134,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@@ -2482,9 +2478,6 @@ class LChunkBuilder BASE_EMBEDDED {
HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
- LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
- LInstruction* DoRSub(HSub* instr);
-
static bool HasMagicNumberForDivisor(int32_t divisor);
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.cc b/deps/v8/src/arm/lithium-codegen-arm.cc
index 06b021669..6f5aa436a 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.cc
+++ b/deps/v8/src/arm/lithium-codegen-arm.cc
@@ -146,20 +146,8 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
-
- info()->set_prologue_offset(masm_->pc_offset());
- {
- PredictableCodeSizeScope predictible_code_size_scope(
- masm_, kNoCodeAgeSequenceLength * Assembler::kInstrSize);
- // The following three instructions must remain together and unmodified
- // for code aging to work properly.
- __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- // Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(2 * kPointerSize));
- }
+ __ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
+ __ add(fp, sp, Operand(2 * kPointerSize)); // Adjust FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
int slots = GetStackSlotCount();
@@ -234,30 +222,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
instr->CompileToNative(this);
}
}
@@ -1324,18 +1289,6 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
-void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
- DwVfpRegister addend = ToDoubleRegister(instr->addend());
- DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier());
- DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
-
- // This is computed in-place.
- ASSERT(addend.is(ToDoubleRegister(instr->result())));
-
- __ vmla(addend, multiplier, multiplicand);
-}
-
-
void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
const Register result = ToRegister(instr->result());
const Register left = ToRegister(instr->left());
@@ -1536,9 +1489,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// Mask the right_op operand.
__ and_(scratch, ToRegister(right_op), Operand(0x1F));
switch (instr->op()) {
- case Token::ROR:
- __ mov(result, Operand(left, ROR, scratch));
- break;
case Token::SAR:
__ mov(result, Operand(left, ASR, scratch));
break;
@@ -1562,13 +1512,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ mov(result, Operand(left, ROR, shift_count));
- } else {
- __ Move(result, left);
- }
- break;
case Token::SAR:
if (shift_count != 0) {
__ mov(result, Operand(left, ASR, shift_count));
@@ -1623,27 +1566,6 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
-void LCodeGen::DoRSubI(LRSubI* instr) {
- LOperand* left = instr->left();
- LOperand* right = instr->right();
- LOperand* result = instr->result();
- bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
- SBit set_cond = can_overflow ? SetCC : LeaveCC;
-
- if (right->IsStackSlot() || right->IsArgument()) {
- Register right_reg = EmitLoadRegister(right, ip);
- __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
- } else {
- ASSERT(right->IsRegister() || right->IsConstantOperand());
- __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
- }
-
- if (can_overflow) {
- DeoptimizeIf(vs, instr->environment());
- }
-}
-
-
void LCodeGen::DoConstantI(LConstantI* instr) {
ASSERT(instr->result()->IsRegister());
__ mov(ToRegister(instr->result()), Operand(instr->value()));
@@ -1764,15 +1686,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -2560,7 +2473,7 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
// We use Factory::the_hole_value() on purpose instead of loading from the
// root array to force relocation to be able to later patch with
// the cached map.
- PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_);
Handle<JSGlobalPropertyCell> cell =
factory()->NewJSGlobalPropertyCell(factory()->the_hole_value());
__ mov(ip, Operand(Handle<Object>(cell)));
@@ -2624,7 +2537,7 @@ void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
static const int kAdditionalDelta = 5;
// Make sure that code size is predicable, since we use specific constants
// offsets in the code to find embedded values..
- PredictableCodeSizeScope predictable(masm_, 6 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_);
int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
Label before_push_delta;
__ bind(&before_push_delta);
@@ -3005,87 +2918,50 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
} else {
- key = ToRegister(instr->key());
+ Register key = EmitLoadRegister(instr->key(), scratch0());
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
}
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+ __ ldr(result, FieldMemOperand(store_base, offset));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- CpuFeatures::Scope scope(VFP3);
- DwVfpRegister result = ToDoubleRegister(instr->result());
- Operand operand = key_is_constant
- ? Operand(constant_key << element_size_shift)
- : Operand(key, LSL, shift_size);
- __ add(scratch0(), external_pointer, operand);
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ vldr(result.low(), scratch0(), additional_offset);
- __ vcvt_f64_f32(result, result.low());
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ vldr(result, scratch0(), additional_offset);
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ ldrsb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ ldrb(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ ldrsh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ ldrh(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ ldr(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ tst(result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment());
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ __ cmp(result, scratch);
+ DeoptimizeIf(eq, instr->environment());
}
}
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
Register elements = ToRegister(instr->elements());
bool key_is_constant = instr->key()->IsConstantOperand();
Register key = no_reg;
@@ -3117,65 +2993,13 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
(instr->additional_index() << element_size_shift)));
}
- __ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
-}
-
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ ldr(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ tst(result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment());
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- __ cmp(result, scratch);
- DeoptimizeIf(eq, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
+ __ vldr(result, elements, 0);
}
@@ -3215,6 +3039,87 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ CpuFeatures::Scope scope(VFP3);
+ DwVfpRegister result = ToDoubleRegister(instr->result());
+ Operand operand = key_is_constant
+ ? Operand(constant_key << element_size_shift)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch0(), external_pointer, operand);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ vldr(result.low(), scratch0(), additional_offset);
+ __ vcvt_f64_f32(result, result.low());
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ vldr(result, scratch0(), additional_offset);
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ ldrsb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ ldrb(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ ldrsh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ ldrh(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ ldr(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ cmp(result, Operand(0x80000000));
+ DeoptimizeIf(cs, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r1));
ASSERT(ToRegister(instr->key()).is(r0));
@@ -3820,20 +3725,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -4109,8 +4000,102 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ add(scratch, elements,
+ Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
+ } else {
+ __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ str(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ add(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kLRHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DwVfpRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ Operand operand = key_is_constant
+ ? Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag)
+ : Operand(key, LSL, shift_size);
+ __ add(scratch, elements, operand);
+ if (!key_is_constant) {
+ __ add(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ // Check for NaN. All NaNs must be canonicalized.
+ __ VFPCompareAndSetFlags(value, value);
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ Vmov(value,
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
+ no_reg, vs);
+ }
+
+ __ vstr(value, scratch, instr->additional_index() << element_size_shift);
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -4179,110 +4164,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DwVfpRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- Operand operand = key_is_constant
- ? Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag)
- : Operand(key, LSL, shift_size);
- __ add(scratch, elements, operand);
- if (!key_is_constant) {
- __ add(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- // Check for NaN. All NaNs must be canonicalized.
- __ VFPCompareAndSetFlags(value, value);
- // Only load canonical NaN if the comparison above set the overflow.
- __ Vmov(value,
- FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
- no_reg, vs);
- }
-
- __ vstr(value, scratch, instr->additional_index() << element_size_shift);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ add(scratch, elements,
- Operand(key, LSL, kPointerSizeLog2 - kSmiTagSize));
- } else {
- __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2));
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ str(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ add(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kLRHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(r2));
ASSERT(ToRegister(instr->key()).is(r1));
@@ -4740,6 +4621,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
+ SwVfpRegister single_scratch = double_scratch.low();
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4761,7 +4643,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ EmitECMATruncate(input_reg,
double_scratch2,
- double_scratch,
+ single_scratch,
scratch1,
scratch2,
scratch3);
@@ -4843,19 +4725,20 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DwVfpRegister double_input = ToDoubleRegister(instr->value());
- DwVfpRegister double_scratch = double_scratch0();
Label done;
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
+ SwVfpRegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
- double_scratch,
+ single_scratch,
scratch1,
scratch2,
scratch3);
} else {
+ DwVfpRegister double_scratch = double_scratch0();
__ EmitVFPTruncate(kRoundToMinusInf,
result_reg,
double_input,
@@ -5029,7 +4912,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
@@ -5054,6 +4936,7 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
DoCheckMapCommon(temp1, temp2,
Handle<Map>(current_prototype->map()),
ALLOW_ELEMENT_TRANSITION_MAPS, instr->environment());
+ DeoptimizeIf(ne, instr->environment());
}
@@ -5664,7 +5547,7 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
__ cmp(sp, Operand(ip));
__ b(hs, &done);
StackCheckStub stub;
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize);
+ PredictableCodeSizeScope predictable(masm_);
CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
EnsureSpaceForLazyDeopt();
__ bind(&done);
diff --git a/deps/v8/src/arm/lithium-codegen-arm.h b/deps/v8/src/arm/lithium-codegen-arm.h
index 921285b0d..9281537c1 100644
--- a/deps/v8/src/arm/lithium-codegen-arm.h
+++ b/deps/v8/src/arm/lithium-codegen-arm.h
@@ -377,12 +377,6 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
diff --git a/deps/v8/src/arm/macro-assembler-arm.cc b/deps/v8/src/arm/macro-assembler-arm.cc
index dc1dc1da9..623bd6a01 100644
--- a/deps/v8/src/arm/macro-assembler-arm.cc
+++ b/deps/v8/src/arm/macro-assembler-arm.cc
@@ -422,17 +422,6 @@ void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
void MacroAssembler::LoadRoot(Register destination,
Heap::RootListIndex index,
Condition cond) {
- if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
- !Heap::RootCanBeWrittenAfterInitialization(index) &&
- !predictable_code_size()) {
- Handle<Object> root(isolate()->heap()->roots_array_start()[index]);
- if (!isolate()->heap()->InNewSpace(*root)) {
- // The CPU supports fast immediate values, and this root will never
- // change. We will load it as a relocatable immediate value.
- mov(destination, Operand(root), LeaveCC, cond);
- return;
- }
- }
ldr(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), cond);
}
@@ -1787,10 +1776,10 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
add(scratch1, length,
- Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
+ Operand(kObjectAlignmentMask + SeqAsciiString::kHeaderSize));
and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -1956,13 +1945,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset) {
+ Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -1989,10 +1978,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
- str(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
+ str(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -2013,8 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
add(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
@@ -2223,28 +2209,12 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
add(r6, r6, Operand(1));
str(r6, MemOperand(r7, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// Native call returns to the DirectCEntry stub which redirects to the
// return address pushed on stack (could have moved after GC).
// DirectCEntry stub itself is generated early and never moves.
DirectCEntryStub stub;
stub.GenerateCall(this, function);
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, r0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
Label promote_scheduled_exception;
Label delete_allocated_handles;
Label leave_exit_frame;
@@ -2490,20 +2460,6 @@ void MacroAssembler::ConvertToInt32(Register source,
}
-void MacroAssembler::TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done) {
- ASSERT(!double_input.is(double_scratch));
-
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
- vcvt_f64_s32(double_scratch, double_scratch.low());
- VFPCompareAndSetFlags(double_input, double_scratch);
- b(eq, done);
-}
-
-
void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Register result,
DwVfpRegister double_input,
@@ -2519,7 +2475,11 @@ void MacroAssembler::EmitVFPTruncate(VFPRoundingMode rounding_mode,
Label done;
// Test for values that can be exactly represented as a signed 32-bit integer.
- TryFastDoubleToInt32(result, double_input, double_scratch, &done);
+ vcvt_s32_f64(double_scratch.low(), double_input);
+ vmov(result, double_scratch.low());
+ vcvt_f64_s32(double_scratch, double_scratch.low());
+ VFPCompareAndSetFlags(double_input, double_scratch);
+ b(eq, &done);
// Convert to integer, respecting rounding mode.
int32_t check_inexact_conversion =
@@ -2636,7 +2596,7 @@ void MacroAssembler::EmitOutOfInt32RangeTruncate(Register result,
void MacroAssembler::EmitECMATruncate(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch,
+ SwVfpRegister single_scratch,
Register scratch,
Register input_high,
Register input_low) {
@@ -2647,18 +2607,16 @@ void MacroAssembler::EmitECMATruncate(Register result,
ASSERT(!scratch.is(result) &&
!scratch.is(input_high) &&
!scratch.is(input_low));
- ASSERT(!double_input.is(double_scratch));
+ ASSERT(!single_scratch.is(double_input.low()) &&
+ !single_scratch.is(double_input.high()));
Label done;
- // Test for values that can be exactly represented as a signed 32-bit integer.
- TryFastDoubleToInt32(result, double_input, double_scratch, &done);
-
// Clear cumulative exception flags.
ClearFPSCRBits(kVFPExceptionMask, scratch);
// Try a conversion to a signed integer.
- vcvt_s32_f64(double_scratch.low(), double_input);
- vmov(result, double_scratch.low());
+ vcvt_s32_f64(single_scratch, double_input);
+ vmov(result, single_scratch);
// Retrieve he FPSCR.
vmrs(scratch);
// Check for overflow and NaNs.
@@ -3370,10 +3328,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch1, first, Operand(kFlatAsciiStringMask));
and_(scratch2, second, Operand(kFlatAsciiStringMask));
cmp(scratch1, Operand(kFlatAsciiStringTag));
@@ -3387,10 +3343,8 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
and_(scratch, type, Operand(kFlatAsciiStringMask));
cmp(scratch, Operand(kFlatAsciiStringTag));
b(ne, failure);
@@ -3730,7 +3684,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
ldr(ip, FieldMemOperand(value, String::kLengthOffset));
tst(instance_type, Operand(kStringEncodingMask));
diff --git a/deps/v8/src/arm/macro-assembler-arm.h b/deps/v8/src/arm/macro-assembler-arm.h
index 15cef16f0..e3e39a387 100644
--- a/deps/v8/src/arm/macro-assembler-arm.h
+++ b/deps/v8/src/arm/macro-assembler-arm.h
@@ -322,7 +322,6 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
@@ -832,14 +831,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset = 0);
+ Label* fail);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
@@ -894,15 +893,12 @@ class MacroAssembler: public Assembler {
// Load and check the instance type of an object for being a string.
// Loads the type into the second argument register.
- // Returns a condition that will be enabled if the object was a string
- // and the passed-in condition passed. If the passed-in condition failed
- // then flags remain unchanged.
+ // Returns a condition that will be enabled if the object was a string.
Condition IsObjectStringType(Register obj,
- Register type,
- Condition cond = al) {
- ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
- ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
- tst(type, Operand(kIsNotStringMask), cond);
+ Register type) {
+ ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+ tst(type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
return eq;
}
@@ -959,14 +955,6 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
- // Try to convert a double to a signed 32-bit integer. If the double value
- // can be exactly represented as an integer, the code jumps to 'done' and
- // 'result' contains the integer value. Otherwise, the code falls through.
- void TryFastDoubleToInt32(Register result,
- DwVfpRegister double_input,
- DwVfpRegister double_scratch,
- Label* done);
-
// Truncates a double using a specific rounding mode, and writes the value
// to the result register.
// Clears the z flag (ne condition) if an overflow occurs.
@@ -997,7 +985,7 @@ class MacroAssembler: public Assembler {
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
DwVfpRegister double_input,
- DwVfpRegister double_scratch,
+ SwVfpRegister single_scratch,
Register scratch,
Register scratch2,
Register scratch3);
@@ -1214,7 +1202,7 @@ class MacroAssembler: public Assembler {
// Souce and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
- // Jump if the register contains a smi.
+ // Jump the register contains a smi.
inline void JumpIfSmi(Register value, Label* smi_label) {
tst(value, Operand(kSmiTagMask));
b(eq, smi_label);
diff --git a/deps/v8/src/arm/regexp-macro-assembler-arm.cc b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
index d852d2376..17b867784 100644
--- a/deps/v8/src/arm/regexp-macro-assembler-arm.cc
+++ b/deps/v8/src/arm/regexp-macro-assembler-arm.cc
@@ -1150,7 +1150,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1181,7 +1181,7 @@ int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/arm/simulator-arm.cc b/deps/v8/src/arm/simulator-arm.cc
index d11e340a9..5b8ba2ada 100644
--- a/deps/v8/src/arm/simulator-arm.cc
+++ b/deps/v8/src/arm/simulator-arm.cc
@@ -1387,14 +1387,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
+ UNIMPLEMENTED();
break;
}
@@ -1466,14 +1459,7 @@ int32_t Simulator::GetShiftRm(Instruction* instr, bool* carry_out) {
}
case ROR: {
- if (shift_amount == 0) {
- *carry_out = c_flag_;
- } else {
- uint32_t left = static_cast<uint32_t>(result) >> shift_amount;
- uint32_t right = static_cast<uint32_t>(result) << (32 - shift_amount);
- result = right | left;
- *carry_out = (static_cast<uint32_t>(result) >> 31) != 0;
- }
+ UNIMPLEMENTED();
break;
}
@@ -2778,20 +2764,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
set_d_register_from_double(vd, dd_value);
- } else if ((instr->Opc1Value() == 0x0) && !(instr->Opc3Value() & 0x1)) {
- // vmla
- if (instr->SzValue() != 0x1) {
- UNREACHABLE(); // Not used by V8.
- }
-
- double dd_value = get_double_from_d_register(vd);
- double dn_value = get_double_from_d_register(vn);
- double dm_value = get_double_from_d_register(vm);
-
- // Note: we do the mul and add in separate steps to avoid getting a result
- // with too high precision.
- set_d_register_from_double(vd, dn_value * dm_value);
- set_d_register_from_double(vd, get_double_from_d_register(vd) + dd_value);
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
if (instr->SzValue() != 0x1) {
@@ -3301,7 +3273,33 @@ void Simulator::Execute() {
}
-void Simulator::CallInternal(byte* entry) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(r0, va_arg(parameters, int32_t));
+ set_register(r1, va_arg(parameters, int32_t));
+ set_register(r2, va_arg(parameters, int32_t));
+ set_register(r3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
// Prepare to execute the code at entry
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -3355,37 +3353,6 @@ void Simulator::CallInternal(byte* entry) {
set_register(r9, r9_val);
set_register(r10, r10_val);
set_register(r11, r11_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(r0, va_arg(parameters, int32_t));
- set_register(r1, va_arg(parameters, int32_t));
- set_register(r2, va_arg(parameters, int32_t));
- set_register(r3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -3396,27 +3363,6 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (use_eabi_hardfloat()) {
- set_d_register_from_double(0, d0);
- set_d_register_from_double(1, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(2, buffer);
- }
- CallInternal(entry);
- if (use_eabi_hardfloat()) {
- return get_double_from_d_register(0);
- } else {
- return get_double_from_register_pair(0);
- }
-}
-
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/arm/simulator-arm.h b/deps/v8/src/arm/simulator-arm.h
index ec47fa1f1..abc91bbc4 100644
--- a/deps/v8/src/arm/simulator-arm.h
+++ b/deps/v8/src/arm/simulator-arm.h
@@ -205,8 +205,6 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -358,8 +356,6 @@ class Simulator {
template<class InputType, int register_size>
void SetVFPRegister(int reg_index, const InputType& value);
- void CallInternal(byte* entry);
-
// Architecture state.
// Saturating instructions require a Q flag to indicate saturation.
// There is currently no way to read the CPSR directly, and thus read the Q
diff --git a/deps/v8/src/arm/stub-cache-arm.cc b/deps/v8/src/arm/stub-cache-arm.cc
index a194dfae5..d3b58624c 100644
--- a/deps/v8/src/arm/stub-cache-arm.cc
+++ b/deps/v8/src/arm/stub-cache-arm.cc
@@ -327,23 +327,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index) {
- if (index.is_header_index()) {
- int offset = index.header_index() * kPointerSize;
+ int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
__ ldr(dst, FieldMemOperand(src, offset));
} else {
- // Adjust for the number of properties stored in the holder.
- int slot = index.field_index() - holder->map()->inobject_properties();
- if (slot < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (slot * kPointerSize);
- __ ldr(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = slot * kPointerSize + FixedArray::kHeaderSize;
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ ldr(dst, FieldMemOperand(dst, offset));
- }
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ ldr(dst, FieldMemOperand(dst, offset));
}
}
@@ -1201,7 +1196,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1550,7 +1545,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r2 : name
@@ -1623,7 +1618,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
+ Label attempt_to_grow_elements;
Register elements = r6;
Register end_elements = r5;
@@ -1634,9 +1629,10 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
r0,
Heap::kFixedArrayMapRootIndex,
- &check_double,
+ &call_builtin,
DONT_DO_SMI_CHECK);
+
// Get the array's length into r0 and calculate new length.
__ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
STATIC_ASSERT(kSmiTagSize == 1);
@@ -1651,6 +1647,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ b(gt, &attempt_to_grow_elements);
// Check if value is a smi.
+ Label with_write_barrier;
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(r4, &with_write_barrier);
@@ -1670,40 +1667,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- r0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(r0, r0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(r0, r4);
- __ b(gt, &call_builtin);
-
- __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- r4, r0, elements, r3, r5, r2, r9,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
__ bind(&with_write_barrier);
__ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1715,11 +1678,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(r3, r7, &call_builtin);
-
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r7, ip);
- __ b(eq, &call_builtin);
// edx: receiver
// r3: map
Label try_holey_map;
@@ -2954,7 +2912,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- r0 : receiver
@@ -3143,7 +3101,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// ----------- S t a t e -------------
// -- lr : return address
// -- r0 : key
@@ -3509,13 +3467,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// r1: constructor function
// r2: initial map
// r7: undefined
- ASSERT(function->has_initial_map());
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ cmp(r3, Operand(instance_size >> kPointerSizeLog2));
- __ Check(eq, "Instance size of initial map changed.");
-#endif
__ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3573,6 +3525,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3856,20 +3809,20 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
// Now we can use r0 for the result as key is not needed any more.
__ mov(r0, r5);
- Register dst_mantissa = r1;
- Register dst_exponent = r3;
+ Register dst1 = r1;
+ Register dst2 = r3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
d0,
- dst_mantissa,
- dst_exponent,
+ dst1,
+ dst2,
r9,
s0);
- __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
+ __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
+ __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@@ -4138,7 +4091,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
- d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
+ d0, r6, r7, // These are: double_dst, dst1, dst2.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatures::Scope scope(VFP2);
@@ -4197,7 +4150,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
- __ EmitECMATruncate(r5, d0, d1, r6, r7, r9);
+ __ EmitECMATruncate(r5, d0, s2, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
@@ -4690,12 +4643,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- r1 : key
// -- r2 : receiver
// -- lr : return address
- // -- r3 : scratch (elements backing store)
+ // -- r3 : scratch
// -- r4 : scratch
// -- r5 : scratch
- // -- r6 : scratch
- // -- r7 : scratch
- // -- r9 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4708,7 +4658,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = r5;
Register scratch3 = r6;
Register scratch4 = r7;
- Register scratch5 = r9;
Register length_reg = r7;
// This stub is meant to be tail-jumped to, the receiver must already
@@ -4739,6 +4688,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ StoreNumberToDoubleElements(value_reg,
key_reg,
+ receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4787,7 +4737,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
- // Initialize the new FixedDoubleArray.
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ mov(scratch1,
@@ -4795,25 +4746,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ mov(scratch1, Operand(kHoleNanLower32));
- __ mov(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ str(scratch1, FieldMemOperand(elements_reg, offset));
- __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
// Install the new backing store in the JSArray.
__ str(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4826,7 +4758,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ ldr(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
+ __ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
diff --git a/deps/v8/src/array.js b/deps/v8/src/array.js
index 47f796d2b..250c30c32 100644
--- a/deps/v8/src/array.js
+++ b/deps/v8/src/array.js
@@ -413,7 +413,6 @@ function ArrayJoin(separator) {
["Array.prototype.join"]);
}
- var length = TO_UINT32(this.length);
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@@ -423,7 +422,7 @@ function ArrayJoin(separator) {
var result = %_FastAsciiArrayJoin(this, separator);
if (!IS_UNDEFINED(result)) return result;
- return Join(this, length, separator, ConvertToString);
+ return Join(this, TO_UINT32(this.length), separator, ConvertToString);
}
@@ -442,8 +441,8 @@ function ArrayPop() {
}
n--;
var value = this[n];
- delete this[n];
this.length = n;
+ delete this[n];
return value;
}
@@ -582,7 +581,7 @@ function ArrayShift() {
var first = this[0];
- if (IS_ARRAY(this) && !%IsObserved(this)) {
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 1, len, 0);
} else {
SimpleMove(this, 0, 1, len, 0);
@@ -603,7 +602,7 @@ function ArrayUnshift(arg1) { // length == 1
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
- if (IS_ARRAY(this) && !%IsObserved(this)) {
+ if (IS_ARRAY(this)) {
SmartMove(this, 0, 0, len, num_arguments);
} else {
SimpleMove(this, 0, 0, len, num_arguments);
@@ -650,7 +649,6 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
@@ -707,9 +705,7 @@ function ArraySplice(start, delete_count) {
var use_simple_splice = true;
- if (IS_ARRAY(this) &&
- !%IsObserved(this) &&
- num_additional_args !== del_count) {
+ if (IS_ARRAY(this) && num_additional_args !== del_count) {
// If we are only deleting/moving a few things near the end of the
// array then the simple version is going to be faster, because it
// doesn't touch most of the array.
@@ -1553,11 +1549,9 @@ function SetUpArray() {
// exposed to user code.
// Adding only the functions that are actually used.
SetUpLockedPrototype(InternalArray, $Array(), $Array(
- "indexOf", getFunction("indexOf", ArrayIndexOf),
"join", getFunction("join", ArrayJoin),
"pop", getFunction("pop", ArrayPop),
- "push", getFunction("push", ArrayPush),
- "splice", getFunction("splice", ArraySplice)
+ "push", getFunction("push", ArrayPush)
));
}
diff --git a/deps/v8/src/assembler.cc b/deps/v8/src/assembler.cc
index 25157be2e..d81d4ae61 100644
--- a/deps/v8/src/assembler.cc
+++ b/deps/v8/src/assembler.cc
@@ -103,78 +103,15 @@ static DoubleConstant double_constants;
const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
-static bool math_exp_data_initialized = false;
-static Mutex* math_exp_data_mutex = NULL;
-static double* math_exp_constants_array = NULL;
-static double* math_exp_log_table_array = NULL;
-
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
-AssemblerBase::AssemblerBase(Isolate* isolate, void* buffer, int buffer_size)
+AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
- jit_cookie_(0),
- emit_debug_code_(FLAG_debug_code),
- predictable_code_size_(false) {
+ jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
-
- if (buffer == NULL) {
- // Do our own buffer management.
- if (buffer_size <= kMinimalBufferSize) {
- buffer_size = kMinimalBufferSize;
- if (isolate->assembler_spare_buffer() != NULL) {
- buffer = isolate->assembler_spare_buffer();
- isolate->set_assembler_spare_buffer(NULL);
- }
- }
- if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
- own_buffer_ = true;
- } else {
- // Use externally provided buffer instead.
- ASSERT(buffer_size > 0);
- own_buffer_ = false;
- }
- buffer_ = static_cast<byte*>(buffer);
- buffer_size_ = buffer_size;
-
- pc_ = buffer_;
-}
-
-
-AssemblerBase::~AssemblerBase() {
- if (own_buffer_) {
- if (isolate() != NULL &&
- isolate()->assembler_spare_buffer() == NULL &&
- buffer_size_ == kMinimalBufferSize) {
- isolate()->set_assembler_spare_buffer(buffer_);
- } else {
- DeleteArray(buffer_);
- }
- }
-}
-
-
-// -----------------------------------------------------------------------------
-// Implementation of PredictableCodeSizeScope
-
-PredictableCodeSizeScope::PredictableCodeSizeScope(AssemblerBase* assembler,
- int expected_size)
- : assembler_(assembler),
- expected_size_(expected_size),
- start_offset_(assembler->pc_offset()),
- old_value_(assembler->predictable_code_size()) {
- assembler_->set_predictable_code_size(true);
-}
-
-
-PredictableCodeSizeScope::~PredictableCodeSizeScope() {
- // TODO(svenpanne) Remove the 'if' when everything works.
- if (expected_size_ >= 0) {
- CHECK_EQ(expected_size_, assembler_->pc_offset() - start_offset_);
- }
- assembler_->set_predictable_code_size(old_value_);
}
@@ -376,7 +313,6 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
- ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
<= kMaxStandardNonCompactModes);
@@ -634,15 +570,6 @@ void RelocIterator::next() {
}
}
}
- if (code_age_sequence_ != NULL) {
- byte* old_code_age_sequence = code_age_sequence_;
- code_age_sequence_ = NULL;
- if (SetMode(RelocInfo::CODE_AGE_SEQUENCE)) {
- rinfo_.data_ = 0;
- rinfo_.pc_ = old_code_age_sequence;
- return;
- }
- }
done_ = true;
}
@@ -658,12 +585,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
- byte* sequence = code->FindCodeAgeSequence();
- if (sequence != NULL && !Code::IsYoungSequence(sequence)) {
- code_age_sequence_ = sequence;
- } else {
- code_age_sequence_ = NULL;
- }
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -679,7 +600,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
- code_age_sequence_ = NULL;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@@ -732,8 +652,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
UNREACHABLE();
#endif
return "debug break slot";
- case RelocInfo::CODE_AGE_SEQUENCE:
- return "code_age_sequence";
case RelocInfo::NUMBER_OF_MODES:
UNREACHABLE();
return "number_of_modes";
@@ -821,9 +739,6 @@ void RelocInfo::Verify() {
case NUMBER_OF_MODES:
UNREACHABLE();
break;
- case CODE_AGE_SEQUENCE:
- ASSERT(Code::IsYoungSequence(pc_) || code_age_stub()->IsCode());
- break;
}
}
#endif // VERIFY_HEAP
@@ -841,70 +756,6 @@ void ExternalReference::SetUp() {
double_constants.canonical_non_hole_nan = OS::nan_value();
double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
double_constants.negative_infinity = -V8_INFINITY;
-
- math_exp_data_mutex = OS::CreateMutex();
-}
-
-
-void ExternalReference::InitializeMathExpData() {
- // Early return?
- if (math_exp_data_initialized) return;
-
- math_exp_data_mutex->Lock();
- if (!math_exp_data_initialized) {
- // If this is changed, generated code must be adapted too.
- const int kTableSizeBits = 11;
- const int kTableSize = 1 << kTableSizeBits;
- const double kTableSizeDouble = static_cast<double>(kTableSize);
-
- math_exp_constants_array = new double[9];
- // Input values smaller than this always return 0.
- math_exp_constants_array[0] = -708.39641853226408;
- // Input values larger than this always return +Infinity.
- math_exp_constants_array[1] = 709.78271289338397;
- math_exp_constants_array[2] = V8_INFINITY;
- // The rest is black magic. Do not attempt to understand it. It is
- // loosely based on the "expd" function published at:
- // http://herumi.blogspot.com/2011/08/fast-double-precision-exponential.html
- const double constant3 = (1 << kTableSizeBits) / log(2.0);
- math_exp_constants_array[3] = constant3;
- math_exp_constants_array[4] =
- static_cast<double>(static_cast<int64_t>(3) << 51);
- math_exp_constants_array[5] = 1 / constant3;
- math_exp_constants_array[6] = 3.0000000027955394;
- math_exp_constants_array[7] = 0.16666666685227835;
- math_exp_constants_array[8] = 1;
-
- math_exp_log_table_array = new double[kTableSize];
- for (int i = 0; i < kTableSize; i++) {
- double value = pow(2, i / kTableSizeDouble);
-
- uint64_t bits = BitCast<uint64_t, double>(value);
- bits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa = BitCast<double, uint64_t>(bits);
-
- // <just testing>
- uint64_t doublebits;
- memcpy(&doublebits, &value, sizeof doublebits);
- doublebits &= (static_cast<uint64_t>(1) << 52) - 1;
- double mantissa2;
- memcpy(&mantissa2, &doublebits, sizeof mantissa2);
- CHECK_EQ(mantissa, mantissa2);
- // </just testing>
-
- math_exp_log_table_array[i] = mantissa;
- }
-
- math_exp_data_initialized = true;
- }
- math_exp_data_mutex->Unlock();
-}
-
-
-void ExternalReference::TearDownMathExpData() {
- delete[] math_exp_constants_array;
- delete[] math_exp_log_table_array;
- delete math_exp_data_mutex;
}
@@ -1023,13 +874,6 @@ ExternalReference ExternalReference::get_date_field_function(
}
-ExternalReference ExternalReference::get_make_code_young_function(
- Isolate* isolate) {
- return ExternalReference(Redirect(
- isolate, FUNCTION_ADDR(Code::MakeCodeAgeSequenceYoung)));
-}
-
-
ExternalReference ExternalReference::date_cache_stamp(Isolate* isolate) {
return ExternalReference(isolate->date_cache()->stamp_address());
}
@@ -1056,20 +900,6 @@ ExternalReference ExternalReference::compute_output_frames_function(
}
-ExternalReference ExternalReference::log_enter_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::EnterExternal)));
-}
-
-
-ExternalReference ExternalReference::log_leave_external_function(
- Isolate* isolate) {
- return ExternalReference(
- Redirect(isolate, FUNCTION_ADDR(Logger::LeaveExternal)));
-}
-
-
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
@@ -1356,19 +1186,6 @@ ExternalReference ExternalReference::math_log_double_function(
}
-ExternalReference ExternalReference::math_exp_constants(int constant_index) {
- ASSERT(math_exp_data_initialized);
- return ExternalReference(
- reinterpret_cast<void*>(math_exp_constants_array + constant_index));
-}
-
-
-ExternalReference ExternalReference::math_exp_log_table() {
- ASSERT(math_exp_data_initialized);
- return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
-}
-
-
ExternalReference ExternalReference::page_flags(Page* page) {
return ExternalReference(reinterpret_cast<Address>(page) +
MemoryChunk::kFlagsOffset);
diff --git a/deps/v8/src/assembler.h b/deps/v8/src/assembler.h
index 4639374c2..a0e55cc81 100644
--- a/deps/v8/src/assembler.h
+++ b/deps/v8/src/assembler.h
@@ -56,56 +56,18 @@ struct StatsCounter;
class AssemblerBase: public Malloced {
public:
- AssemblerBase(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~AssemblerBase();
+ explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
- int jit_cookie() const { return jit_cookie_; }
-
- bool emit_debug_code() const { return emit_debug_code_; }
- void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
-
- bool predictable_code_size() const { return predictable_code_size_; }
- void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
+ int jit_cookie() { return jit_cookie_; }
// Overwrite a host NaN with a quiet target NaN. Used by mksnapshot for
// cross-snapshotting.
static void QuietNaN(HeapObject* nan) { }
- int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
-
- static const int kMinimalBufferSize = 4*KB;
-
- protected:
- // The buffer into which code and relocation info are generated. It could
- // either be owned by the assembler or be provided externally.
- byte* buffer_;
- int buffer_size_;
- bool own_buffer_;
-
- // The program counter, which points into the buffer above and moves forward.
- byte* pc_;
-
private:
Isolate* isolate_;
int jit_cookie_;
- bool emit_debug_code_;
- bool predictable_code_size_;
-};
-
-
-// Avoids using instructions that vary in size in unpredictable ways between the
-// snapshot and the running VM.
-class PredictableCodeSizeScope {
- public:
- PredictableCodeSizeScope(AssemblerBase* assembler, int expected_size);
- ~PredictableCodeSizeScope();
-
- private:
- AssemblerBase* assembler_;
- int expected_size_;
- int start_offset_;
- bool old_value_;
};
@@ -249,12 +211,6 @@ class RelocInfo BASE_EMBEDDED {
// Pseudo-types
NUMBER_OF_MODES, // There are at most 15 modes with noncompact encoding.
NONE, // never recorded
- CODE_AGE_SEQUENCE, // Not stored in RelocInfo array, used explictly by
- // code aging.
- FIRST_REAL_RELOC_MODE = CODE_TARGET,
- LAST_REAL_RELOC_MODE = CONST_POOL,
- FIRST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
- LAST_PSEUDO_RELOC_MODE = CODE_AGE_SEQUENCE,
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
@@ -269,15 +225,6 @@ class RelocInfo BASE_EMBEDDED {
: pc_(pc), rmode_(rmode), data_(data), host_(host) {
}
- static inline bool IsRealRelocMode(Mode mode) {
- return mode >= FIRST_REAL_RELOC_MODE &&
- mode <= LAST_REAL_RELOC_MODE;
- }
- static inline bool IsPseudoRelocMode(Mode mode) {
- ASSERT(!IsRealRelocMode(mode));
- return mode >= FIRST_PSEUDO_RELOC_MODE &&
- mode <= LAST_PSEUDO_RELOC_MODE;
- }
static inline bool IsConstructCall(Mode mode) {
return mode == CONSTRUCT_CALL;
}
@@ -315,9 +262,6 @@ class RelocInfo BASE_EMBEDDED {
static inline bool IsDebugBreakSlot(Mode mode) {
return mode == DEBUG_BREAK_SLOT;
}
- static inline bool IsCodeAgeSequence(Mode mode) {
- return mode == CODE_AGE_SEQUENCE;
- }
static inline int ModeMask(Mode mode) { return 1 << mode; }
// Accessors
@@ -350,8 +294,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(Handle<JSGlobalPropertyCell> target_cell_handle());
INLINE(void set_target_cell(JSGlobalPropertyCell* cell,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER));
- INLINE(Code* code_age_stub());
- INLINE(void set_code_age_stub(Code* stub));
+
// Read the address of the word containing the target_address in an
// instruction stream. What this means exactly is architecture-independent.
@@ -544,7 +487,6 @@ class RelocIterator: public Malloced {
byte* pos_;
byte* end_;
- byte* code_age_sequence_;
RelocInfo rinfo_;
bool done_;
int mode_mask_;
@@ -604,8 +546,6 @@ class ExternalReference BASE_EMBEDDED {
};
static void SetUp();
- static void InitializeMathExpData();
- static void TearDownMathExpData();
typedef void* ExternalReferenceRedirector(void* original, Type type);
@@ -655,16 +595,10 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference get_date_field_function(Isolate* isolate);
static ExternalReference date_cache_stamp(Isolate* isolate);
- static ExternalReference get_make_code_young_function(Isolate* isolate);
-
// Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
- // Log support.
- static ExternalReference log_enter_external_function(Isolate* isolate);
- static ExternalReference log_leave_external_function(Isolate* isolate);
-
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
@@ -731,9 +665,6 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference math_tan_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
- static ExternalReference math_exp_constants(int constant_index);
- static ExternalReference math_exp_log_table();
-
static ExternalReference page_flags(Page* page);
Address address() const {return reinterpret_cast<Address>(address_);}
diff --git a/deps/v8/src/ast.cc b/deps/v8/src/ast.cc
index 232cb739a..52990b8fe 100644
--- a/deps/v8/src/ast.cc
+++ b/deps/v8/src/ast.cc
@@ -103,7 +103,6 @@ VariableProxy::VariableProxy(Isolate* isolate,
void VariableProxy::BindTo(Variable* var) {
ASSERT(var_ == NULL); // must be bound only once
ASSERT(var != NULL); // must bind
- ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
// Ideally CONST-ness should match. However, this is very hard to achieve
// because we don't know the exact semantics of conflicting (const and
@@ -477,7 +476,6 @@ void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle,
void CaseClause::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
TypeInfo info = oracle->SwitchType(this);
- if (info.IsUninitialized()) info = TypeInfo::Unknown();
if (info.IsSmi()) {
compare_type_ = SMI_ONLY;
} else if (info.IsSymbol()) {
@@ -606,6 +604,18 @@ void CallNew::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
}
+void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
+ TypeInfo info = oracle->CompareType(this);
+ if (info.IsSmi()) {
+ compare_type_ = SMI_ONLY;
+ } else if (info.IsNonPrimitive()) {
+ compare_type_ = OBJECT_ONLY;
+ } else {
+ ASSERT(compare_type_ == NONE);
+ }
+}
+
+
void ObjectLiteral::Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
receiver_type_ = oracle->ObjectLiteralStoreIsMonomorphic(this)
? oracle->GetObjectLiteralStoreMap(this)
@@ -1060,14 +1070,16 @@ REGULAR_NODE(CallNew)
// LOOKUP variables only result from constructs that cannot be inlined anyway.
REGULAR_NODE(VariableProxy)
-// We currently do not optimize any modules.
+// We currently do not optimize any modules. Note in particular, that module
+// instance objects associated with ModuleLiterals are allocated during
+// scope resolution, and references to them are embedded into the code.
+// That code may hence neither be cached nor re-compiled.
DONT_OPTIMIZE_NODE(ModuleDeclaration)
DONT_OPTIMIZE_NODE(ImportDeclaration)
DONT_OPTIMIZE_NODE(ExportDeclaration)
DONT_OPTIMIZE_NODE(ModuleVariable)
DONT_OPTIMIZE_NODE(ModulePath)
DONT_OPTIMIZE_NODE(ModuleUrl)
-DONT_OPTIMIZE_NODE(ModuleStatement)
DONT_OPTIMIZE_NODE(WithStatement)
DONT_OPTIMIZE_NODE(TryCatchStatement)
DONT_OPTIMIZE_NODE(TryFinallyStatement)
diff --git a/deps/v8/src/ast.h b/deps/v8/src/ast.h
index d299f19a2..802ac6596 100644
--- a/deps/v8/src/ast.h
+++ b/deps/v8/src/ast.h
@@ -75,7 +75,6 @@ namespace internal {
#define STATEMENT_NODE_LIST(V) \
V(Block) \
- V(ModuleStatement) \
V(ExpressionStatement) \
V(EmptyStatement) \
V(IfStatement) \
@@ -523,7 +522,7 @@ class ModuleDeclaration: public Declaration {
ModuleDeclaration(VariableProxy* proxy,
Module* module,
Scope* scope)
- : Declaration(proxy, MODULE, scope),
+ : Declaration(proxy, LET, scope),
module_(module) {
}
@@ -646,25 +645,6 @@ class ModuleUrl: public Module {
};
-class ModuleStatement: public Statement {
- public:
- DECLARE_NODE_TYPE(ModuleStatement)
-
- VariableProxy* proxy() const { return proxy_; }
- Block* body() const { return body_; }
-
- protected:
- ModuleStatement(VariableProxy* proxy, Block* body)
- : proxy_(proxy),
- body_(body) {
- }
-
- private:
- VariableProxy* proxy_;
- Block* body_;
-};
-
-
class IterationStatement: public BreakableStatement {
public:
// Type testing & conversion.
@@ -1437,7 +1417,7 @@ class VariableProxy: public Expression {
void MarkAsTrivial() { is_trivial_ = true; }
void MarkAsLValue() { is_lvalue_ = true; }
- // Bind this proxy to the variable var. Interfaces must match.
+ // Bind this proxy to the variable var.
void BindTo(Variable* var);
protected:
@@ -1797,6 +1777,9 @@ class CompareOperation: public Expression {
// Type feedback information.
TypeFeedbackId CompareOperationFeedbackId() const { return reuse(id()); }
+ void RecordTypeFeedback(TypeFeedbackOracle* oracle);
+ bool IsSmiCompare() { return compare_type_ == SMI_ONLY; }
+ bool IsObjectCompare() { return compare_type_ == OBJECT_ONLY; }
// Match special cases.
bool IsLiteralCompareTypeof(Expression** expr, Handle<String>* check);
@@ -1813,7 +1796,8 @@ class CompareOperation: public Expression {
op_(op),
left_(left),
right_(right),
- pos_(pos) {
+ pos_(pos),
+ compare_type_(NONE) {
ASSERT(Token::IsCompareOp(op));
}
@@ -1822,6 +1806,9 @@ class CompareOperation: public Expression {
Expression* left_;
Expression* right_;
int pos_;
+
+ enum CompareTypeFeedback { NONE, SMI_ONLY, OBJECT_ONLY };
+ CompareTypeFeedback compare_type_;
};
@@ -2660,11 +2647,6 @@ class AstNodeFactory BASE_EMBEDDED {
STATEMENT_WITH_LABELS(SwitchStatement)
#undef STATEMENT_WITH_LABELS
- ModuleStatement* NewModuleStatement(VariableProxy* proxy, Block* body) {
- ModuleStatement* stmt = new(zone_) ModuleStatement(proxy, body);
- VISIT_AND_RETURN(ModuleStatement, stmt)
- }
-
ExpressionStatement* NewExpressionStatement(Expression* expression) {
ExpressionStatement* stmt = new(zone_) ExpressionStatement(expression);
VISIT_AND_RETURN(ExpressionStatement, stmt)
diff --git a/deps/v8/src/atomicops.h b/deps/v8/src/atomicops.h
index da33b2968..1f0c44a67 100644
--- a/deps/v8/src/atomicops.h
+++ b/deps/v8/src/atomicops.h
@@ -151,9 +151,7 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
} } // namespace v8::internal
// Include our platform specific implementation.
-#if defined(THREAD_SANITIZER)
-#include "atomicops_internals_tsan.h"
-#elif defined(_MSC_VER) && \
+#if defined(_MSC_VER) && \
(defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
#include "atomicops_internals_x86_msvc.h"
#elif defined(__APPLE__) && \
diff --git a/deps/v8/src/atomicops_internals_tsan.h b/deps/v8/src/atomicops_internals_tsan.h
deleted file mode 100644
index 6559336ad..000000000
--- a/deps/v8/src/atomicops_internals_tsan.h
+++ /dev/null
@@ -1,335 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-// This file is an internal atomic implementation for compiler-based
-// ThreadSanitizer. Use base/atomicops.h instead.
-
-#ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
-#define V8_ATOMICOPS_INTERNALS_TSAN_H_
-
-// This struct is not part of the public API of this module; clients may not
-// use it. (However, it's exported via BASE_EXPORT because clients implicitly
-// do use it at link time by inlining these functions.)
-// Features of this x86. Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
- bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
- // after acquire compare-and-swap.
- bool has_sse2; // Processor has SSE2.
-};
-extern struct AtomicOps_x86CPUFeatureStruct
- AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
-
-namespace v8 {
-namespace internal {
-
-#ifndef TSAN_INTERFACE_ATOMIC_H
-#define TSAN_INTERFACE_ATOMIC_H
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef char __tsan_atomic8;
-typedef short __tsan_atomic16; // NOLINT
-typedef int __tsan_atomic32;
-typedef long __tsan_atomic64; // NOLINT
-
-typedef enum {
- __tsan_memory_order_relaxed = (1 << 0) + 100500,
- __tsan_memory_order_consume = (1 << 1) + 100500,
- __tsan_memory_order_acquire = (1 << 2) + 100500,
- __tsan_memory_order_release = (1 << 3) + 100500,
- __tsan_memory_order_acq_rel = (1 << 4) + 100500,
- __tsan_memory_order_seq_cst = (1 << 5) + 100500,
-} __tsan_memory_order;
-
-__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a,
- __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a,
- __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a,
- __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a,
- __tsan_memory_order mo);
-
-void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v,
- __tsan_memory_order mo);
-void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v,
- __tsan_memory_order mo);
-void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v,
- __tsan_memory_order mo);
-void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v,
- __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a,
- __tsan_atomic8 v, __tsan_memory_order mo);
-__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a,
- __tsan_atomic16 v, __tsan_memory_order mo);
-__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a,
- __tsan_atomic32 v, __tsan_memory_order mo);
-__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a,
- __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a,
- __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo);
-int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a,
- __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo);
-int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a,
- __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo);
-int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a,
- __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo);
-
-void __tsan_atomic_thread_fence(__tsan_memory_order mo);
-
-#ifdef __cplusplus
-} // extern "C"
-#endif
-
-#endif // #ifndef TSAN_INTERFACE_ATOMIC_H
-
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
- Atomic32 new_value) {
- return __tsan_atomic32_exchange(ptr, new_value,
- __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
- Atomic32 increment) {
- return increment + __tsan_atomic32_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
- Atomic32 old_value,
- Atomic32 new_value) {
- Atomic32 cmp = old_value;
- __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
- return cmp;
-}
-
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
- __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
- return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_relaxed);
- return cmp;
-}
-
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
- Atomic64 new_value) {
- return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
- Atomic64 increment) {
- return increment + __tsan_atomic64_fetch_add(ptr, increment,
- __tsan_memory_order_acq_rel);
-}
-
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
-}
-
-inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
- __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
-}
-
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
- return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
-}
-
-inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
- return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
-}
-
-inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_acquire);
- return cmp;
-}
-
-inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
- Atomic64 old_value,
- Atomic64 new_value) {
- Atomic64 cmp = old_value;
- __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
- __tsan_memory_order_release);
- return cmp;
-}
-
-inline void MemoryBarrier() {
- __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
-}
-
-} // namespace internal
-} // namespace v8
-
-#undef ATOMICOPS_COMPILER_BARRIER
-
-#endif // V8_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/deps/v8/src/bootstrapper.cc b/deps/v8/src/bootstrapper.cc
index 8d529506f..a368eefe7 100644
--- a/deps/v8/src/bootstrapper.cc
+++ b/deps/v8/src/bootstrapper.cc
@@ -1084,11 +1084,11 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate);
result->LocalLookup(heap->callee_symbol(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsCalleeIndex);
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsCalleeIndex);
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1186,7 +1186,7 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
LookupResult lookup(isolate);
result->LocalLookup(heap->length_symbol(), &lookup);
ASSERT(lookup.IsField());
- ASSERT(lookup.GetFieldIndex().field_index() == Heap::kArgumentsLengthIndex);
+ ASSERT(lookup.GetFieldIndex() == Heap::kArgumentsLengthIndex);
ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
@@ -1240,9 +1240,8 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
// Initialize the out of memory slot.
native_context()->set_out_of_memory(heap->false_value());
- // Initialize the embedder data slot.
- Handle<FixedArray> embedder_data = factory->NewFixedArray(2);
- native_context()->set_embedder_data(*embedder_data);
+ // Initialize the data slot.
+ native_context()->set_data(heap->undefined_value());
{
// Initialize the random seed slot.
@@ -1341,7 +1340,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
if (cache == NULL || !cache->Lookup(name, &function_info)) {
- ASSERT(source->IsOneByteRepresentation());
+ ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = factory->NewStringFromUtf8(name);
function_info = Compiler::Compile(
source,
@@ -1416,11 +1415,6 @@ void Genesis::InstallExperimentalNativeFunctions() {
INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
}
- if (FLAG_harmony_observation) {
- INSTALL_NATIVE(JSFunction, "NotifyChange", observers_notify_change);
- INSTALL_NATIVE(JSFunction, "DeliverChangeRecords",
- observers_deliver_changes);
- }
}
#undef INSTALL_NATIVE
@@ -1834,11 +1828,6 @@ bool Genesis::InstallExperimentalNatives() {
"native collection.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
- if (FLAG_harmony_observation &&
- strcmp(ExperimentalNatives::GetScriptName(i).start(),
- "native object-observe.js") == 0) {
- if (!CompileExperimentalBuiltin(isolate(), i)) return false;
- }
}
InstallExperimentalNativeFunctions();
diff --git a/deps/v8/src/bootstrapper.h b/deps/v8/src/bootstrapper.h
index d61c0313f..179e65c35 100644
--- a/deps/v8/src/bootstrapper.h
+++ b/deps/v8/src/bootstrapper.h
@@ -54,7 +54,7 @@ class SourceCodeCache BASE_EMBEDDED {
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
- SeqOneByteString* str = SeqOneByteString::cast(cache_->get(i));
+ SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
diff --git a/deps/v8/src/builtins.cc b/deps/v8/src/builtins.cc
index d62713db4..df70cd4fc 100644
--- a/deps/v8/src/builtins.cc
+++ b/deps/v8/src/builtins.cc
@@ -268,7 +268,7 @@ static MaybeObject* ArrayCodeGenericCommon(Arguments* args,
maybe_elms = heap->AllocateFixedArrayWithHoles(number_of_elements);
}
FixedArrayBase* elms;
- if (!maybe_elms->To(&elms)) return maybe_elms;
+ if (!maybe_elms->To<FixedArrayBase>(&elms)) return maybe_elms;
// Fill in the content
switch (array->GetElementsKind()) {
@@ -325,18 +325,6 @@ BUILTIN(ArrayCodeGeneric) {
}
-static void MoveDoubleElements(FixedDoubleArray* dst,
- int dst_index,
- FixedDoubleArray* src,
- int src_index,
- int len) {
- if (len == 0) return;
- memmove(dst->data_start() + dst_index,
- src->data_start() + src_index,
- len * kDoubleSize);
-}
-
-
static void MoveElements(Heap* heap,
AssertNoAllocation* no_gc,
FixedArray* dst,
@@ -363,39 +351,24 @@ static void FillWithHoles(Heap* heap, FixedArray* dst, int from, int to) {
}
-static void FillWithHoles(FixedDoubleArray* dst, int from, int to) {
- for (int i = from; i < to; i++) {
- dst->set_the_hole(i);
- }
-}
-
-
-static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
- FixedArrayBase* elms,
- int to_trim) {
- Map* map = elms->map();
- int entry_size;
- if (elms->IsFixedArray()) {
- entry_size = kPointerSize;
- } else {
- entry_size = kDoubleSize;
- }
+static FixedArray* LeftTrimFixedArray(Heap* heap,
+ FixedArray* elms,
+ int to_trim) {
ASSERT(elms->map() != HEAP->fixed_cow_array_map());
// For now this trick is only applied to fixed arrays in new and paged space.
// In large object space the object's start must coincide with chunk
// and thus the trick is just not applicable.
ASSERT(!HEAP->lo_space()->Contains(elms));
- STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
- STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
- STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+ STATIC_ASSERT(FixedArray::kMapOffset == 0);
+ STATIC_ASSERT(FixedArray::kLengthOffset == kPointerSize);
+ STATIC_ASSERT(FixedArray::kHeaderSize == 2 * kPointerSize);
Object** former_start = HeapObject::RawField(elms, 0);
const int len = elms->length();
- if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
- elms->IsFixedArray() &&
+ if (to_trim > FixedArray::kHeaderSize / kPointerSize &&
!heap->new_space()->Contains(elms)) {
// If we are doing a big trim in old space then we zap the space that was
// formerly part of the array so that the GC (aided by the card-based
@@ -409,15 +382,14 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
- heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
+ heap->CreateFillerObjectAt(elms->address(), to_trim * kPointerSize);
- int new_start_index = to_trim * (entry_size / kPointerSize);
- former_start[new_start_index] = map;
- former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
+ former_start[to_trim] = heap->fixed_array_map();
+ former_start[to_trim + 1] = Smi::FromInt(len - to_trim);
// Maintain marking consistency for HeapObjectIterator and
// IncrementalMarking.
- int size_delta = to_trim * entry_size;
+ int size_delta = to_trim * kPointerSize;
if (heap->marking()->TransferMark(elms->address(),
elms->address() + size_delta)) {
MemoryChunk::IncrementLiveBytesFromMutator(elms->address(), -size_delta);
@@ -425,8 +397,8 @@ static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
HEAP_PROFILE(heap, ObjectMoveEvent(elms->address(),
elms->address() + size_delta));
- return FixedArrayBase::cast(HeapObject::FromAddress(
- elms->address() + to_trim * entry_size));
+ return FixedArray::cast(HeapObject::FromAddress(
+ elms->address() + to_trim * kPointerSize));
}
@@ -455,14 +427,19 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
Map* map = elms->map();
if (map == heap->fixed_array_map()) {
if (args == NULL || array->HasFastObjectElements()) return elms;
+ if (array->HasFastDoubleElements()) {
+ ASSERT(elms == heap->empty_fixed_array());
+ MaybeObject* maybe_transition =
+ array->TransitionElementsKind(FAST_ELEMENTS);
+ if (maybe_transition->IsFailure()) return maybe_transition;
+ return elms;
+ }
} else if (map == heap->fixed_cow_array_map()) {
MaybeObject* maybe_writable_result = array->EnsureWritableFastElements();
if (args == NULL || array->HasFastObjectElements() ||
- !maybe_writable_result->To(&elms)) {
+ maybe_writable_result->IsFailure()) {
return maybe_writable_result;
}
- } else if (map == heap->fixed_double_array_map()) {
- if (args == NULL) return elms;
} else {
return NULL;
}
@@ -472,28 +449,13 @@ static inline MaybeObject* EnsureJSArrayWithWritableFastElements(
int args_length = args->length();
if (first_added_arg >= args_length) return array->elements();
- ElementsKind origin_kind = array->map()->elements_kind();
- ASSERT(!IsFastObjectElementsKind(origin_kind));
- ElementsKind target_kind = origin_kind;
- int arg_count = args->length() - first_added_arg;
- Object** arguments = args->arguments() - first_added_arg - (arg_count - 1);
- for (int i = 0; i < arg_count; i++) {
- Object* arg = arguments[i];
- if (arg->IsHeapObject()) {
- if (arg->IsHeapNumber()) {
- target_kind = FAST_DOUBLE_ELEMENTS;
- } else {
- target_kind = FAST_ELEMENTS;
- break;
- }
- }
- }
- if (target_kind != origin_kind) {
- MaybeObject* maybe_failure = array->TransitionElementsKind(target_kind);
- if (maybe_failure->IsFailure()) return maybe_failure;
- return array->elements();
- }
- return elms;
+ MaybeObject* maybe_array = array->EnsureCanContainElements(
+ args,
+ first_added_arg,
+ args_length - first_added_arg,
+ DONT_ALLOW_DOUBLE_ELEMENTS);
+ if (maybe_array->IsFailure()) return maybe_array;
+ return array->elements();
}
@@ -537,200 +499,127 @@ MUST_USE_RESULT static MaybeObject* CallJsBuiltin(
BUILTIN(ArrayPush) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
- if (maybe_elms_obj == NULL) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
- }
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (FLAG_harmony_observation &&
- JSObject::cast(receiver)->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayPush", args);
+ Object* elms_obj;
+ { MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 1);
+ if (maybe_elms_obj == NULL) {
+ return CallJsBuiltin(isolate, "ArrayPush", args);
+ }
+ if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
-
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
- ElementsKind kind = array->GetElementsKind();
- if (IsFastSmiOrObjectElementsKind(kind)) {
- FixedArray* elms = FixedArray::cast(elms_obj);
-
- int len = Smi::cast(array->length())->value();
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
- }
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
-
- int new_length = len + to_add;
-
- if (new_length > elms->length()) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, new_elms, kind, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- elms = new_elms;
- }
-
- // Add the provided values.
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int index = 0; index < to_add; index++) {
- elms->set(index + len, args[index + 1], mode);
- }
-
- if (elms != array->elements()) {
- array->set_elements(elms);
- }
+ int len = Smi::cast(array->length())->value();
+ int to_add = args.length() - 1;
+ if (to_add == 0) {
+ return Smi::FromInt(len);
+ }
+ // Currently fixed arrays cannot grow too big, so
+ // we should never hit this case.
+ ASSERT(to_add <= (Smi::kMaxValue - len));
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
- } else {
- int len = Smi::cast(array->length())->value();
- int elms_len = elms_obj->length();
+ int new_length = len + to_add;
- int to_add = args.length() - 1;
- if (to_add == 0) {
- return Smi::FromInt(len);
+ if (new_length > elms->length()) {
+ // New backing storage is needed.
+ int capacity = new_length + (new_length >> 1) + 16;
+ Object* obj;
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
}
- // Currently fixed arrays cannot grow too big, so
- // we should never hit this case.
- ASSERT(to_add <= (Smi::kMaxValue - len));
+ FixedArray* new_elms = FixedArray::cast(obj);
- int new_length = len + to_add;
-
- FixedDoubleArray* new_elms;
-
- if (new_length > elms_len) {
- // New backing storage is needed.
- int capacity = new_length + (new_length >> 1) + 16;
- MaybeObject* maybe_obj =
- heap->AllocateUninitializedFixedDoubleArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, new_elms, kind, 0,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms_obj);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
- } else {
- // to_add is > 0 and new_length <= elms_len, so elms_obj cannot be the
- // empty_fixed_array.
- new_elms = FixedDoubleArray::cast(elms_obj);
- }
+ ElementsKind kind = array->GetElementsKind();
+ CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, 0, len);
+ FillWithHoles(heap, new_elms, new_length, capacity);
- // Add the provided values.
- AssertNoAllocation no_gc;
- int index;
- for (index = 0; index < to_add; index++) {
- Object* arg = args[index + 1];
- new_elms->set(index + len, arg->Number());
- }
+ elms = new_elms;
+ }
- if (new_elms != array->elements()) {
- array->set_elements(new_elms);
- }
+ // Add the provided values.
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int index = 0; index < to_add; index++) {
+ elms->set(index + len, args[index + 1], mode);
+ }
- // Set the length.
- array->set_length(Smi::FromInt(new_length));
- return Smi::FromInt(new_length);
+ if (elms != array->elements()) {
+ array->set_elements(elms);
}
+
+ // Set the length.
+ array->set_length(Smi::FromInt(new_length));
+ return Smi::FromInt(new_length);
}
BUILTIN(ArrayPop) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
- JSArray* array = JSArray::cast(receiver);
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayPop", args);
+ Object* elms_obj;
+ { MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL) return CallJsBuiltin(isolate, "ArrayPop", args);
+ if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
+ JSArray* array = JSArray::cast(receiver);
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- int new_length = len - 1;
- MaybeObject* maybe_result;
- if (accessor->HasElement(array, array, new_length, elms_obj)) {
- maybe_result = accessor->Get(array, array, new_length, elms_obj);
- } else {
- maybe_result = array->GetPrototype()->GetElement(len - 1);
+ // Get top element
+ MaybeObject* top = elms->get(len - 1);
+
+ // Set the length.
+ array->set_length(Smi::FromInt(len - 1));
+
+ if (!top->IsTheHole()) {
+ // Delete the top element.
+ elms->set_the_hole(len - 1);
+ return top;
}
- if (maybe_result->IsFailure()) return maybe_result;
- MaybeObject* maybe_failure =
- accessor->SetLength(array, Smi::FromInt(new_length));
- if (maybe_failure->IsFailure()) return maybe_failure;
- return maybe_result;
+
+ top = array->GetPrototype()->GetElement(len - 1);
+
+ return top;
}
BUILTIN(ArrayShift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayShift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
+ Object* elms_obj;
+ { MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayShift", args);
+ if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
+ }
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayShift", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayShift", args);
- }
+ ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
if (len == 0) return heap->undefined_value();
// Get first element
- ElementsAccessor* accessor = array->GetElementsAccessor();
- Object* first;
- MaybeObject* maybe_first = accessor->Get(receiver, array, 0, elms_obj);
- if (!maybe_first->To(&first)) return maybe_first;
+ Object* first = elms->get(0);
if (first->IsTheHole()) {
first = heap->undefined_value();
}
- if (!heap->lo_space()->Contains(elms_obj)) {
- array->set_elements(LeftTrimFixedArray(heap, elms_obj, 1));
+ if (!heap->lo_space()->Contains(elms)) {
+ array->set_elements(LeftTrimFixedArray(heap, elms, 1));
} else {
// Shift the elements.
- if (elms_obj->IsFixedArray()) {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
- elms->set(len - 1, heap->the_hole_value());
- } else {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, 0, elms, 1, len - 1);
- elms->set_the_hole(len - 1);
- }
+ AssertNoAllocation no_gc;
+ MoveElements(heap, &no_gc, elms, 0, elms, 1, len - 1);
+ elms->set(len - 1, heap->the_hole_value());
}
// Set the length.
@@ -743,25 +632,19 @@ BUILTIN(ArrayShift) {
BUILTIN(ArrayUnshift) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms_obj =
- EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
- if (maybe_elms_obj == NULL)
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- if (!maybe_elms_obj->To(&elms_obj)) return maybe_elms_obj;
-
- if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ Object* elms_obj;
+ { MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, NULL, 0);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArrayUnshift", args);
+ if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- JSArray* array = JSArray::cast(receiver);
- if (!array->HasFastSmiOrObjectElements()) {
+ if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArrayUnshift", args);
}
FixedArray* elms = FixedArray::cast(elms_obj);
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArrayUnshift", args);
- }
+ JSArray* array = JSArray::cast(receiver);
+ ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
int to_add = args.length() - 1;
@@ -778,18 +661,14 @@ BUILTIN(ArrayUnshift) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_elms = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_elms->To(&new_elms)) return maybe_elms;
-
+ Object* obj;
+ { MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ FixedArray* new_elms = FixedArray::cast(obj);
ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, new_elms, kind, to_add,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
+ CopyObjectToObjectElements(elms, kind, 0, new_elms, kind, to_add, len);
+ FillWithHoles(heap, new_elms, new_length, capacity);
elms = new_elms;
array->set_elements(elms);
} else {
@@ -813,20 +692,16 @@ BUILTIN(ArrayUnshift) {
BUILTIN(ArraySlice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms;
+ FixedArray* elms;
int len = -1;
if (receiver->IsJSArray()) {
JSArray* array = JSArray::cast(receiver);
- if (!IsJSArrayFastElementMovingAllowed(heap, array)) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
-
- if (array->HasFastElements()) {
- elms = array->elements();
- } else {
+ if (!array->HasFastSmiOrObjectElements() ||
+ !IsJSArrayFastElementMovingAllowed(heap, array)) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
+ elms = FixedArray::cast(array->elements());
len = Smi::cast(array->length())->value();
} else {
// Array.slice(arguments, ...) is quite a common idiom (notably more
@@ -835,19 +710,15 @@ BUILTIN(ArraySlice) {
isolate->context()->native_context()->arguments_boilerplate()->map();
bool is_arguments_object_with_fast_elements =
- receiver->IsJSObject() &&
- JSObject::cast(receiver)->map() == arguments_map;
+ receiver->IsJSObject()
+ && JSObject::cast(receiver)->map() == arguments_map
+ && JSObject::cast(receiver)->HasFastSmiOrObjectElements();
if (!is_arguments_object_with_fast_elements) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- JSObject* object = JSObject::cast(receiver);
-
- if (object->HasFastElements()) {
- elms = object->elements();
- } else {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- Object* len_obj = object->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
+ elms = FixedArray::cast(JSObject::cast(receiver)->elements());
+ Object* len_obj = JSObject::cast(receiver)
+ ->InObjectPropertyAt(Heap::kArgumentsLengthIndex);
if (!len_obj->IsSmi()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -855,27 +726,12 @@ BUILTIN(ArraySlice) {
if (len > elms->length()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
- }
-
- JSObject* object = JSObject::cast(receiver);
- ElementsKind kind = object->GetElementsKind();
-
- if (IsHoleyElementsKind(kind)) {
- bool packed = true;
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
for (int i = 0; i < len; i++) {
- if (!accessor->HasElement(object, object, i, elms)) {
- packed = false;
- break;
+ if (elms->get(i) == heap->the_hole_value()) {
+ return CallJsBuiltin(isolate, "ArraySlice", args);
}
}
- if (packed) {
- kind = GetPackedElementsKind(kind);
- } else if (!receiver->IsJSArray()) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
}
-
ASSERT(len >= 0);
int n_arguments = args.length() - 1;
@@ -888,12 +744,6 @@ BUILTIN(ArraySlice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -901,12 +751,6 @@ BUILTIN(ArraySlice) {
Object* arg2 = args[2];
if (arg2->IsSmi()) {
relative_end = Smi::cast(arg2)->value();
- } else if (arg2->IsHeapNumber()) {
- double end = HeapNumber::cast(arg2)->value();
- if (end < kMinInt || end > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySlice", args);
- }
- relative_end = static_cast<int>(end);
} else if (!arg2->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySlice", args);
}
@@ -921,24 +765,21 @@ BUILTIN(ArraySlice) {
int final = (relative_end < 0) ? Max(len + relative_end, 0)
: Min(relative_end, len);
+ ElementsKind elements_kind = JSObject::cast(receiver)->GetElementsKind();
+
// Calculate the length of result array.
int result_len = Max(final - k, 0);
+ MaybeObject* maybe_array =
+ heap->AllocateJSArrayAndStorage(elements_kind,
+ result_len,
+ result_len);
JSArray* result_array;
- MaybeObject* maybe_array = heap->AllocateJSArrayAndStorage(kind,
- result_len,
- result_len);
-
- AssertNoAllocation no_gc;
- if (result_len == 0) return maybe_array;
if (!maybe_array->To(&result_array)) return maybe_array;
- ElementsAccessor* accessor = object->GetElementsAccessor();
- MaybeObject* maybe_failure =
- accessor->CopyElements(NULL, k, result_array->elements(),
- kind, 0, result_len, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ CopyObjectToObjectElements(elms, elements_kind, k,
+ FixedArray::cast(result_array->elements()),
+ elements_kind, 0, result_len);
return result_array;
}
@@ -947,22 +788,19 @@ BUILTIN(ArraySlice) {
BUILTIN(ArraySplice) {
Heap* heap = isolate->heap();
Object* receiver = *args.receiver();
- FixedArrayBase* elms_obj;
- MaybeObject* maybe_elms =
- EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
- if (maybe_elms == NULL) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
+ Object* elms_obj;
+ { MaybeObject* maybe_elms_obj =
+ EnsureJSArrayWithWritableFastElements(heap, receiver, &args, 3);
+ if (maybe_elms_obj == NULL)
+ return CallJsBuiltin(isolate, "ArraySplice", args);
+ if (!maybe_elms_obj->ToObject(&elms_obj)) return maybe_elms_obj;
}
- if (!maybe_elms->To(&elms_obj)) return maybe_elms;
-
if (!IsJSArrayFastElementMovingAllowed(heap, JSArray::cast(receiver))) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
+ FixedArray* elms = FixedArray::cast(elms_obj);
JSArray* array = JSArray::cast(receiver);
-
- if (FLAG_harmony_observation && array->map()->is_observed()) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
+ ASSERT(array->HasFastSmiOrObjectElements());
int len = Smi::cast(array->length())->value();
@@ -973,12 +811,6 @@ BUILTIN(ArraySplice) {
Object* arg1 = args[1];
if (arg1->IsSmi()) {
relative_start = Smi::cast(arg1)->value();
- } else if (arg1->IsHeapNumber()) {
- double start = HeapNumber::cast(arg1)->value();
- if (start < kMinInt || start > kMaxInt) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
- relative_start = static_cast<int>(start);
} else if (!arg1->IsUndefined()) {
return CallJsBuiltin(isolate, "ArraySplice", args);
}
@@ -1008,84 +840,51 @@ BUILTIN(ArraySplice) {
actual_delete_count = Min(Max(value, 0), len - actual_start);
}
- ElementsKind elements_kind = array->GetElementsKind();
-
- int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
- int new_length = len - actual_delete_count + item_count;
-
- // For double mode we do not support changing the length.
- if (new_length > len && IsFastDoubleElementsKind(elements_kind)) {
- return CallJsBuiltin(isolate, "ArraySplice", args);
- }
-
- if (new_length == 0) {
- MaybeObject* maybe_array = heap->AllocateJSArrayWithElements(
- elms_obj, elements_kind, actual_delete_count);
- if (maybe_array->IsFailure()) return maybe_array;
- array->set_elements(heap->empty_fixed_array());
- array->set_length(Smi::FromInt(0));
- return maybe_array;
- }
-
JSArray* result_array = NULL;
+ ElementsKind elements_kind =
+ JSObject::cast(receiver)->GetElementsKind();
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
actual_delete_count,
actual_delete_count);
if (!maybe_array->To(&result_array)) return maybe_array;
- if (actual_delete_count > 0) {
- AssertNoAllocation no_gc;
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure =
- accessor->CopyElements(NULL, actual_start, result_array->elements(),
- elements_kind, 0, actual_delete_count, elms_obj);
- // Cannot fail since the origin and target array are of the same elements
- // kind.
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ {
+ // Fill newly created array.
+ CopyObjectToObjectElements(elms, elements_kind, actual_start,
+ FixedArray::cast(result_array->elements()),
+ elements_kind, 0, actual_delete_count);
}
+ int item_count = (n_arguments > 1) ? (n_arguments - 2) : 0;
+ int new_length = len - actual_delete_count + item_count;
+
bool elms_changed = false;
if (item_count < actual_delete_count) {
// Shrink the array.
- const bool trim_array = !heap->lo_space()->Contains(elms_obj) &&
+ const bool trim_array = !heap->lo_space()->Contains(elms) &&
((actual_start + item_count) <
(len - actual_delete_count - actual_start));
if (trim_array) {
const int delta = actual_delete_count - item_count;
- if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, delta, elms, 0, actual_start);
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
+ {
AssertNoAllocation no_gc;
MoveElements(heap, &no_gc, elms, delta, elms, 0, actual_start);
}
- elms_obj = LeftTrimFixedArray(heap, elms_obj, delta);
+ elms = LeftTrimFixedArray(heap, elms, delta);
elms_changed = true;
} else {
- if (elms_obj->IsFixedDoubleArray()) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- MoveDoubleElements(elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(elms, new_length, len);
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- MoveElements(heap, &no_gc,
- elms, actual_start + item_count,
- elms, actual_start + actual_delete_count,
- (len - actual_delete_count - actual_start));
- FillWithHoles(heap, elms, new_length, len);
- }
+ AssertNoAllocation no_gc;
+ MoveElements(heap, &no_gc,
+ elms, actual_start + item_count,
+ elms, actual_start + actual_delete_count,
+ (len - actual_delete_count - actual_start));
+ FillWithHoles(heap, elms, new_length, len);
}
} else if (item_count > actual_delete_count) {
- FixedArray* elms = FixedArray::cast(elms_obj);
// Currently fixed arrays cannot grow too big, so
// we should never hit this case.
ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
@@ -1094,29 +893,28 @@ BUILTIN(ArraySplice) {
if (new_length > elms->length()) {
// New backing storage is needed.
int capacity = new_length + (new_length >> 1) + 16;
- FixedArray* new_elms;
- MaybeObject* maybe_obj = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe_obj->To(&new_elms)) return maybe_obj;
-
- AssertNoAllocation no_gc;
+ Object* obj;
+ { MaybeObject* maybe_obj =
+ heap->AllocateUninitializedFixedArray(capacity);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
+ FixedArray* new_elms = FixedArray::cast(obj);
- ElementsKind kind = array->GetElementsKind();
- ElementsAccessor* accessor = array->GetElementsAccessor();
- if (actual_start > 0) {
+ {
// Copy the part before actual_start as is.
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, 0, new_elms, kind, 0, actual_start, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
+ ElementsKind kind = array->GetElementsKind();
+ CopyObjectToObjectElements(elms, kind, 0,
+ new_elms, kind, 0, actual_start);
+ const int to_copy = len - actual_delete_count - actual_start;
+ CopyObjectToObjectElements(elms, kind,
+ actual_start + actual_delete_count,
+ new_elms, kind,
+ actual_start + item_count, to_copy);
}
- MaybeObject* maybe_failure = accessor->CopyElements(
- NULL, actual_start + actual_delete_count, new_elms, kind,
- actual_start + item_count,
- ElementsAccessor::kCopyToEndAndInitializeToHole, elms);
- ASSERT(!maybe_failure->IsFailure());
- USE(maybe_failure);
-
- elms_obj = new_elms;
+
+ FillWithHoles(heap, new_elms, new_length, capacity);
+
+ elms = new_elms;
elms_changed = true;
} else {
AssertNoAllocation no_gc;
@@ -1127,28 +925,16 @@ BUILTIN(ArraySplice) {
}
}
- if (IsFastDoubleElementsKind(elements_kind)) {
- FixedDoubleArray* elms = FixedDoubleArray::cast(elms_obj);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- Object* arg = args[3 + k - actual_start];
- if (arg->IsSmi()) {
- elms->set(k, Smi::cast(arg)->value());
- } else {
- elms->set(k, HeapNumber::cast(arg)->value());
- }
- }
- } else {
- FixedArray* elms = FixedArray::cast(elms_obj);
- AssertNoAllocation no_gc;
- WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
- for (int k = actual_start; k < actual_start + item_count; k++) {
- elms->set(k, args[3 + k - actual_start], mode);
- }
+ AssertNoAllocation no_gc;
+ WriteBarrierMode mode = elms->GetWriteBarrierMode(no_gc);
+ for (int k = actual_start; k < actual_start + item_count; k++) {
+ elms->set(k, args[3 + k - actual_start], mode);
}
if (elms_changed) {
- array->set_elements(elms_obj);
+ array->set_elements(elms);
}
+
// Set the length.
array->set_length(Smi::FromInt(new_length));
@@ -1170,15 +956,14 @@ BUILTIN(ArrayConcat) {
int n_arguments = args.length();
int result_len = 0;
ElementsKind elements_kind = GetInitialFastElementsKind();
- bool has_double = false;
- bool is_holey = false;
for (int i = 0; i < n_arguments; i++) {
Object* arg = args[i];
if (!arg->IsJSArray() ||
- !JSArray::cast(arg)->HasFastElements() ||
+ !JSArray::cast(arg)->HasFastSmiOrObjectElements() ||
JSArray::cast(arg)->GetPrototype() != array_proto) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
+
int len = Smi::cast(JSArray::cast(arg)->length())->value();
// We shouldn't overflow when adding another len.
@@ -1188,51 +973,47 @@ BUILTIN(ArrayConcat) {
result_len += len;
ASSERT(result_len >= 0);
- if (result_len > FixedDoubleArray::kMaxLength) {
+ if (result_len > FixedArray::kMaxLength) {
return CallJsBuiltin(isolate, "ArrayConcat", args);
}
- ElementsKind arg_kind = JSArray::cast(arg)->map()->elements_kind();
- has_double = has_double || IsFastDoubleElementsKind(arg_kind);
- is_holey = is_holey || IsFastHoleyElementsKind(arg_kind);
- if (IsMoreGeneralElementsKindTransition(elements_kind, arg_kind)) {
- elements_kind = arg_kind;
+ if (!JSArray::cast(arg)->HasFastSmiElements()) {
+ if (IsFastSmiElementsKind(elements_kind)) {
+ if (IsFastHoleyElementsKind(elements_kind)) {
+ elements_kind = FAST_HOLEY_ELEMENTS;
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
+ }
+
+ if (JSArray::cast(arg)->HasFastHoleyElements()) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
}
}
- if (is_holey) elements_kind = GetHoleyElementsKind(elements_kind);
-
- // If a double array is concatted into a fast elements array, the fast
- // elements array needs to be initialized to contain proper holes, since
- // boxing doubles may cause incremental marking.
- ArrayStorageAllocationMode mode =
- has_double && IsFastObjectElementsKind(elements_kind)
- ? INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE : DONT_INITIALIZE_ARRAY_ELEMENTS;
- JSArray* result_array;
// Allocate result.
+ JSArray* result_array;
MaybeObject* maybe_array =
heap->AllocateJSArrayAndStorage(elements_kind,
result_len,
- result_len,
- mode);
+ result_len);
if (!maybe_array->To(&result_array)) return maybe_array;
if (result_len == 0) return result_array;
- int j = 0;
- FixedArrayBase* storage = result_array->elements();
+ // Copy data.
+ int start_pos = 0;
+ FixedArray* result_elms(FixedArray::cast(result_array->elements()));
for (int i = 0; i < n_arguments; i++) {
JSArray* array = JSArray::cast(args[i]);
int len = Smi::cast(array->length())->value();
- if (len > 0) {
- ElementsAccessor* accessor = array->GetElementsAccessor();
- MaybeObject* maybe_failure =
- accessor->CopyElements(array, 0, storage, elements_kind, j, len);
- if (maybe_failure->IsFailure()) return maybe_failure;
- j += len;
- }
+ FixedArray* elms = FixedArray::cast(array->elements());
+ CopyObjectToObjectElements(elms, elements_kind, 0,
+ result_elms, elements_kind,
+ start_pos, len);
+ start_pos += len;
}
-
- ASSERT(j == result_len);
+ ASSERT(start_pos == result_len);
return result_array;
}
@@ -1252,28 +1033,12 @@ BUILTIN(StrictModePoisonPill) {
//
-// Searches the hidden prototype chain of the given object for the first
-// object that is an instance of the given type. If no such object can
-// be found then Heap::null_value() is returned.
-static inline Object* FindHidden(Heap* heap,
- Object* object,
- FunctionTemplateInfo* type) {
- if (object->IsInstanceOf(type)) return object;
- Object* proto = object->GetPrototype();
- if (proto->IsJSObject() &&
- JSObject::cast(proto)->map()->is_hidden_prototype()) {
- return FindHidden(heap, proto, type);
- }
- return heap->null_value();
-}
-
-
// Returns the holder JSObject if the function can legally be called
// with this receiver. Returns Heap::null_value() if the call is
// illegal. Any arguments that don't fit the expected type is
-// overwritten with undefined. Note that holder and the arguments are
-// implicitly rewritten with the first object in the hidden prototype
-// chain that actually has the expected type.
+// overwritten with undefined. Arguments that do fit the expected
+// type is overwritten with the object in the prototype chain that
+// actually has that type.
static inline Object* TypeCheck(Heap* heap,
int argc,
Object** argv,
@@ -1286,10 +1051,15 @@ static inline Object* TypeCheck(Heap* heap,
SignatureInfo* sig = SignatureInfo::cast(sig_obj);
// If necessary, check the receiver
Object* recv_type = sig->receiver();
+
Object* holder = recv;
if (!recv_type->IsUndefined()) {
- holder = FindHidden(heap, holder, FunctionTemplateInfo::cast(recv_type));
- if (holder == heap->null_value()) return heap->null_value();
+ for (; holder != heap->null_value(); holder = holder->GetPrototype()) {
+ if (holder->IsInstanceOf(FunctionTemplateInfo::cast(recv_type))) {
+ break;
+ }
+ }
+ if (holder == heap->null_value()) return holder;
}
Object* args_obj = sig->args();
// If there is no argument signature we're done
@@ -1302,9 +1072,13 @@ static inline Object* TypeCheck(Heap* heap,
if (argtype->IsUndefined()) continue;
Object** arg = &argv[-1 - i];
Object* current = *arg;
- current = FindHidden(heap, current, FunctionTemplateInfo::cast(argtype));
- if (current == heap->null_value()) current = heap->undefined_value();
- *arg = current;
+ for (; current != heap->null_value(); current = current->GetPrototype()) {
+ if (current->IsInstanceOf(FunctionTemplateInfo::cast(argtype))) {
+ *arg = current;
+ break;
+ }
+ }
+ if (current == heap->null_value()) *arg = heap->undefined_value();
}
return holder;
}
diff --git a/deps/v8/src/builtins.h b/deps/v8/src/builtins.h
index a2f752e05..ca70ae540 100644
--- a/deps/v8/src/builtins.h
+++ b/deps/v8/src/builtins.h
@@ -38,25 +38,6 @@ enum BuiltinExtraArguments {
};
-#define CODE_AGE_LIST_WITH_ARG(V, A) \
- V(Quadragenarian, A) \
- V(Quinquagenarian, A) \
- V(Sexagenarian, A) \
- V(Septuagenarian, A) \
- V(Octogenarian, A)
-
-#define CODE_AGE_LIST_IGNORE_ARG(X, V) V(X)
-
-#define CODE_AGE_LIST(V) \
- CODE_AGE_LIST_WITH_ARG(CODE_AGE_LIST_IGNORE_ARG, V)
-
-#define DECLARE_CODE_AGE_BUILTIN(C, V) \
- V(Make##C##CodeYoungAgainOddMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState) \
- V(Make##C##CodeYoungAgainEvenMarking, BUILTIN, \
- UNINITIALIZED, Code::kNoExtraICState)
-
-
// Define list of builtins implemented in C++.
#define BUILTIN_LIST_C(V) \
V(Illegal, NO_EXTRA_ARGUMENTS) \
@@ -214,8 +195,8 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
- Code::kNoExtraICState) \
- CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
+ Code::kNoExtraICState)
+
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
@@ -398,14 +379,6 @@ class Builtins {
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
-#define DECLARE_CODE_AGE_BUILTIN_GENERATOR(C) \
- static void Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm); \
- static void Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm);
- CODE_AGE_LIST(DECLARE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DECLARE_CODE_AGE_BUILTIN_GENERATOR
-
static void InitBuiltinFunctionTable();
bool initialized_;
diff --git a/deps/v8/src/code-stubs.cc b/deps/v8/src/code-stubs.cc
index 276c87ebd..7a720592d 100644
--- a/deps/v8/src/code-stubs.cc
+++ b/deps/v8/src/code-stubs.cc
@@ -37,11 +37,11 @@
namespace v8 {
namespace internal {
-bool CodeStub::FindCodeInCache(Code** code_out, Isolate* isolate) {
- UnseededNumberDictionary* stubs = isolate->heap()->code_stubs();
- int index = stubs->FindEntry(GetKey());
+bool CodeStub::FindCodeInCache(Code** code_out) {
+ Heap* heap = Isolate::Current()->heap();
+ int index = heap->code_stubs()->FindEntry(GetKey());
if (index != UnseededNumberDictionary::kNotFound) {
- *code_out = Code::cast(stubs->ValueAt(index));
+ *code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
return false;
@@ -93,8 +93,8 @@ Handle<Code> CodeStub::GetCode() {
Heap* heap = isolate->heap();
Code* code;
if (UseSpecialCache()
- ? FindCodeInSpecialCache(&code, isolate)
- : FindCodeInCache(&code, isolate)) {
+ ? FindCodeInSpecialCache(&code)
+ : FindCodeInCache(&code)) {
ASSERT(IsPregenerated() == code->is_pregenerated());
return Handle<Code>(code);
}
@@ -169,122 +169,6 @@ void CodeStub::PrintName(StringStream* stream) {
}
-void BinaryOpStub::Generate(MacroAssembler* masm) {
- // Explicitly allow generation of nested stubs. It is safe here because
- // generation code does not use any raw pointers.
- AllowStubCallsScope allow_stub_calls(masm, true);
-
- BinaryOpIC::TypeInfo operands_type = Max(left_type_, right_type_);
- if (left_type_ == BinaryOpIC::ODDBALL && right_type_ == BinaryOpIC::ODDBALL) {
- // The OddballStub handles a number and an oddball, not two oddballs.
- operands_type = BinaryOpIC::GENERIC;
- }
- switch (operands_type) {
- case BinaryOpIC::UNINITIALIZED:
- GenerateTypeTransition(masm);
- break;
- case BinaryOpIC::SMI:
- GenerateSmiStub(masm);
- break;
- case BinaryOpIC::INT32:
- GenerateInt32Stub(masm);
- break;
- case BinaryOpIC::HEAP_NUMBER:
- GenerateHeapNumberStub(masm);
- break;
- case BinaryOpIC::ODDBALL:
- GenerateOddballStub(masm);
- break;
- case BinaryOpIC::STRING:
- GenerateStringStub(masm);
- break;
- case BinaryOpIC::GENERIC:
- GenerateGeneric(masm);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
- switch (op_) {
- case Token::ADD:
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
- break;
- case Token::SUB:
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
- break;
- case Token::MUL:
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
- break;
- case Token::DIV:
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
- break;
- case Token::MOD:
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
- break;
- case Token::BIT_OR:
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
- break;
- case Token::BIT_AND:
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
- break;
- case Token::BIT_XOR:
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
- break;
- case Token::SAR:
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
- break;
- case Token::SHR:
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
- break;
- case Token::SHL:
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
- break;
- default:
- UNREACHABLE();
- }
-}
-
-
-#undef __
-
-
-void BinaryOpStub::PrintName(StringStream* stream) {
- const char* op_name = Token::Name(op_);
- const char* overwrite_name;
- switch (mode_) {
- case NO_OVERWRITE: overwrite_name = "Alloc"; break;
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
- default: overwrite_name = "UnknownOverwrite"; break;
- }
- stream->Add("BinaryOpStub_%s_%s_%s+%s",
- op_name,
- overwrite_name,
- BinaryOpIC::GetName(left_type_),
- BinaryOpIC::GetName(right_type_));
-}
-
-
-void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
- ASSERT(left_type_ == BinaryOpIC::STRING || right_type_ == BinaryOpIC::STRING);
- ASSERT(op_ == Token::ADD);
- if (left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING) {
- GenerateBothStringStub(masm);
- return;
- }
- // Try to add arguments as strings, otherwise, transition to the generic
- // BinaryOpIC type.
- GenerateAddStrings(masm);
- GenerateTypeTransition(masm);
-}
-
-
void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
ASSERT(*known_map_ != NULL);
Isolate* isolate = new_object->GetIsolate();
@@ -297,7 +181,8 @@ void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
}
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
+bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+ Isolate* isolate = known_map_->GetIsolate();
Factory* factory = isolate->factory();
Code::Flags flags = Code::ComputeFlags(
static_cast<Code::Kind>(GetCodeKind()),
@@ -311,12 +196,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
flags));
if (probe->IsCode()) {
*code_out = Code::cast(*probe);
-#ifdef DEBUG
- Token::Value cached_op;
- ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
- &cached_op);
- ASSERT(op_ == cached_op);
-#endif
+ ASSERT(op_ == (*code_out)->compare_operation() + Token::EQ);
return true;
}
return false;
@@ -324,33 +204,7 @@ bool ICCompareStub::FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
int ICCompareStub::MinorKey() {
- return OpField::encode(op_ - Token::EQ) |
- LeftStateField::encode(left_) |
- RightStateField::encode(right_) |
- HandlerStateField::encode(state_);
-}
-
-
-void ICCompareStub::DecodeMinorKey(int minor_key,
- CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state,
- Token::Value* op) {
- if (left_state) {
- *left_state =
- static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
- }
- if (right_state) {
- *right_state =
- static_cast<CompareIC::State>(RightStateField::decode(minor_key));
- }
- if (handler_state) {
- *handler_state =
- static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
- if (op) {
- *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
- }
+ return OpField::encode(op_ - Token::EQ) | StateField::encode(state_);
}
@@ -359,28 +213,27 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::UNINITIALIZED:
GenerateMiss(masm);
break;
- case CompareIC::SMI:
+ case CompareIC::SMIS:
GenerateSmis(masm);
break;
- case CompareIC::HEAP_NUMBER:
+ case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
break;
- case CompareIC::STRING:
+ case CompareIC::STRINGS:
GenerateStrings(masm);
break;
- case CompareIC::SYMBOL:
+ case CompareIC::SYMBOLS:
GenerateSymbols(masm);
break;
- case CompareIC::OBJECT:
+ case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
case CompareIC::KNOWN_OBJECTS:
ASSERT(*known_map_ != NULL);
GenerateKnownObjects(masm);
break;
- case CompareIC::GENERIC:
- GenerateGeneric(masm);
- break;
+ default:
+ UNREACHABLE();
}
}
diff --git a/deps/v8/src/code-stubs.h b/deps/v8/src/code-stubs.h
index ae113f572..a84384172 100644
--- a/deps/v8/src/code-stubs.h
+++ b/deps/v8/src/code-stubs.h
@@ -141,7 +141,7 @@ class CodeStub BASE_EMBEDDED {
bool CompilingCallsToThisStubIsGCSafe() {
bool is_pregenerated = IsPregenerated();
Code* code = NULL;
- CHECK(!is_pregenerated || FindCodeInCache(&code, Isolate::Current()));
+ CHECK(!is_pregenerated || FindCodeInCache(&code));
return is_pregenerated;
}
@@ -160,7 +160,7 @@ class CodeStub BASE_EMBEDDED {
virtual bool SometimesSetsUpAFrame() { return true; }
// Lookup the code in the (possibly custom) cache.
- bool FindCodeInCache(Code** code_out, Isolate* isolate);
+ bool FindCodeInCache(Code** code_out);
protected:
static bool CanUseFPRegisters();
@@ -202,9 +202,7 @@ class CodeStub BASE_EMBEDDED {
virtual void AddToSpecialCache(Handle<Code> new_object) { }
// Find code in a specialized cache, work is delegated to the specific stub.
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate) {
- return false;
- }
+ virtual bool FindCodeInSpecialCache(Code** code_out) { return false; }
// If a stub uses a special cache override this.
virtual bool UseSpecialCache() { return false; }
@@ -484,132 +482,10 @@ class MathPowStub: public CodeStub {
};
-class BinaryOpStub: public CodeStub {
- public:
- BinaryOpStub(Token::Value op, OverwriteMode mode)
- : op_(op),
- mode_(mode),
- platform_specific_bit_(false),
- left_type_(BinaryOpIC::UNINITIALIZED),
- right_type_(BinaryOpIC::UNINITIALIZED),
- result_type_(BinaryOpIC::UNINITIALIZED) {
- Initialize();
- ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
- }
-
- BinaryOpStub(
- int key,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
- : op_(OpBits::decode(key)),
- mode_(ModeBits::decode(key)),
- platform_specific_bit_(PlatformSpecificBits::decode(key)),
- left_type_(left_type),
- right_type_(right_type),
- result_type_(result_type) { }
-
- static void decode_types_from_minor_key(int minor_key,
- BinaryOpIC::TypeInfo* left_type,
- BinaryOpIC::TypeInfo* right_type,
- BinaryOpIC::TypeInfo* result_type) {
- *left_type =
- static_cast<BinaryOpIC::TypeInfo>(LeftTypeBits::decode(minor_key));
- *right_type =
- static_cast<BinaryOpIC::TypeInfo>(RightTypeBits::decode(minor_key));
- *result_type =
- static_cast<BinaryOpIC::TypeInfo>(ResultTypeBits::decode(minor_key));
- }
-
- static Token::Value decode_op_from_minor_key(int minor_key) {
- return static_cast<Token::Value>(OpBits::decode(minor_key));
- }
-
- enum SmiCodeGenerateHeapNumberResults {
- ALLOW_HEAPNUMBER_RESULTS,
- NO_HEAPNUMBER_RESULTS
- };
-
- private:
- Token::Value op_;
- OverwriteMode mode_;
- bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
-
- // Operand type information determined at runtime.
- BinaryOpIC::TypeInfo left_type_;
- BinaryOpIC::TypeInfo right_type_;
- BinaryOpIC::TypeInfo result_type_;
-
- virtual void PrintName(StringStream* stream);
-
- // Minor key encoding in 19 bits TTTRRRLLLSOOOOOOOMM.
- class ModeBits: public BitField<OverwriteMode, 0, 2> {};
- class OpBits: public BitField<Token::Value, 2, 7> {};
- class PlatformSpecificBits: public BitField<bool, 9, 1> {};
- class LeftTypeBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
- class RightTypeBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
- class ResultTypeBits: public BitField<BinaryOpIC::TypeInfo, 16, 3> {};
-
- Major MajorKey() { return BinaryOp; }
- int MinorKey() {
- return OpBits::encode(op_)
- | ModeBits::encode(mode_)
- | PlatformSpecificBits::encode(platform_specific_bit_)
- | LeftTypeBits::encode(left_type_)
- | RightTypeBits::encode(right_type_)
- | ResultTypeBits::encode(result_type_);
- }
-
-
- // Platform-independent implementation.
- void Generate(MacroAssembler* masm);
- void GenerateCallRuntime(MacroAssembler* masm);
-
- // Platform-independent signature, platform-specific implementation.
- void Initialize();
- void GenerateAddStrings(MacroAssembler* masm);
- void GenerateBothStringStub(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
- void GenerateGenericStub(MacroAssembler* masm);
- void GenerateHeapNumberStub(MacroAssembler* masm);
- void GenerateInt32Stub(MacroAssembler* masm);
- void GenerateLoadArguments(MacroAssembler* masm);
- void GenerateOddballStub(MacroAssembler* masm);
- void GenerateRegisterArgsPush(MacroAssembler* masm);
- void GenerateReturn(MacroAssembler* masm);
- void GenerateSmiStub(MacroAssembler* masm);
- void GenerateStringStub(MacroAssembler* masm);
- void GenerateTypeTransition(MacroAssembler* masm);
- void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
- void GenerateUninitializedStub(MacroAssembler* masm);
-
- // Entirely platform-specific methods are defined as static helper
- // functions in the <arch>/code-stubs-<arch>.cc files.
-
- virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
-
- virtual InlineCacheState GetICState() {
- return BinaryOpIC::ToState(Max(left_type_, right_type_));
- }
-
- virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
- }
-
- friend class CodeGenerator;
-};
-
-
class ICCompareStub: public CodeStub {
public:
- ICCompareStub(Token::Value op,
- CompareIC::State left,
- CompareIC::State right,
- CompareIC::State handler)
- : op_(op),
- left_(left),
- right_(right),
- state_(handler) {
+ ICCompareStub(Token::Value op, CompareIC::State state)
+ : op_(op), state_(state) {
ASSERT(Token::IsCompareOp(op));
}
@@ -617,24 +493,13 @@ class ICCompareStub: public CodeStub {
void set_known_map(Handle<Map> map) { known_map_ = map; }
- static void DecodeMinorKey(int minor_key,
- CompareIC::State* left_state,
- CompareIC::State* right_state,
- CompareIC::State* handler_state,
- Token::Value* op);
-
- static CompareIC::State CompareState(int minor_key) {
- return static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
- }
-
private:
class OpField: public BitField<int, 0, 3> { };
- class LeftStateField: public BitField<int, 3, 3> { };
- class RightStateField: public BitField<int, 6, 3> { };
- class HandlerStateField: public BitField<int, 9, 3> { };
+ class StateField: public BitField<int, 3, 5> { };
virtual void FinishCode(Handle<Code> code) {
- code->set_stub_info(MinorKey());
+ code->set_compare_state(state_);
+ code->set_compare_operation(op_ - Token::EQ);
}
virtual CodeStub::Major MajorKey() { return CompareIC; }
@@ -649,23 +514,117 @@ class ICCompareStub: public CodeStub {
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
void GenerateKnownObjects(MacroAssembler* masm);
- void GenerateGeneric(MacroAssembler* masm);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
virtual void AddToSpecialCache(Handle<Code> new_object);
- virtual bool FindCodeInSpecialCache(Code** code_out, Isolate* isolate);
+ virtual bool FindCodeInSpecialCache(Code** code_out);
virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECTS; }
Token::Value op_;
- CompareIC::State left_;
- CompareIC::State right_;
CompareIC::State state_;
Handle<Map> known_map_;
};
+// Flags that control the compare stub code generation.
+enum CompareFlags {
+ NO_COMPARE_FLAGS = 0,
+ NO_SMI_COMPARE_IN_STUB = 1 << 0,
+ NO_NUMBER_COMPARE_IN_STUB = 1 << 1,
+ CANT_BOTH_BE_NAN = 1 << 2
+};
+
+
+enum NaNInformation {
+ kBothCouldBeNaN,
+ kCantBothBeNaN
+};
+
+
+class CompareStub: public CodeStub {
+ public:
+ CompareStub(Condition cc,
+ bool strict,
+ CompareFlags flags,
+ Register lhs,
+ Register rhs) :
+ cc_(cc),
+ strict_(strict),
+ never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+ include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+ include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
+ lhs_(lhs),
+ rhs_(rhs) { }
+
+ CompareStub(Condition cc,
+ bool strict,
+ CompareFlags flags) :
+ cc_(cc),
+ strict_(strict),
+ never_nan_nan_((flags & CANT_BOTH_BE_NAN) != 0),
+ include_number_compare_((flags & NO_NUMBER_COMPARE_IN_STUB) == 0),
+ include_smi_compare_((flags & NO_SMI_COMPARE_IN_STUB) == 0),
+ lhs_(no_reg),
+ rhs_(no_reg) { }
+
+ void Generate(MacroAssembler* masm);
+
+ private:
+ Condition cc_;
+ bool strict_;
+ // Only used for 'equal' comparisons. Tells the stub that we already know
+ // that at least one side of the comparison is not NaN. This allows the
+ // stub to use object identity in the positive case. We ignore it when
+ // generating the minor key for other comparisons to avoid creating more
+ // stubs.
+ bool never_nan_nan_;
+ // Do generate the number comparison code in the stub. Stubs without number
+ // comparison code is used when the number comparison has been inlined, and
+ // the stub will be called if one of the operands is not a number.
+ bool include_number_compare_;
+
+ // Generate the comparison code for two smi operands in the stub.
+ bool include_smi_compare_;
+
+ // Register holding the left hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+
+ Register lhs_;
+ // Register holding the right hand side of the comparison if the stub gives
+ // a choice, no_reg otherwise.
+ Register rhs_;
+
+ // Encoding of the minor key in 16 bits.
+ class StrictField: public BitField<bool, 0, 1> {};
+ class NeverNanNanField: public BitField<bool, 1, 1> {};
+ class IncludeNumberCompareField: public BitField<bool, 2, 1> {};
+ class IncludeSmiCompareField: public BitField<bool, 3, 1> {};
+ class RegisterField: public BitField<bool, 4, 1> {};
+ class ConditionField: public BitField<int, 5, 11> {};
+
+ Major MajorKey() { return Compare; }
+
+ int MinorKey();
+
+ virtual int GetCodeKind() { return Code::COMPARE_IC; }
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_compare_state(CompareIC::GENERIC);
+ }
+
+ // Branch to the label if the given object isn't a symbol.
+ void BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch);
+
+ // Unfortunately you have to run without snapshots to see most of these
+ // names in the profile since most compare stubs end up in the snapshot.
+ virtual void PrintName(StringStream* stream);
+};
+
+
class CEntryStub : public CodeStub {
public:
explicit CEntryStub(int result_size,
@@ -1094,9 +1053,6 @@ class ToBooleanStub: public CodeStub {
bool IsEmpty() const { return set_.IsEmpty(); }
bool Contains(Type type) const { return set_.Contains(type); }
- bool ContainsAnyOf(Types types) const {
- return set_.ContainsAnyOf(types.set_);
- }
void Add(Type type) { set_.Add(type); }
byte ToByte() const { return set_.ToIntegral(); }
void Print(StringStream* stream) const;
@@ -1215,8 +1171,6 @@ class ProfileEntryHookStub : public CodeStub {
// non-NULL hook.
static bool SetFunctionEntryHook(FunctionEntryHook entry_hook);
- static bool HasEntryHook() { return entry_hook_ != NULL; }
-
private:
static void EntryHookTrampoline(intptr_t function,
intptr_t stack_pointer);
diff --git a/deps/v8/src/codegen.cc b/deps/v8/src/codegen.cc
index 83ac854a0..0163580e9 100644
--- a/deps/v8/src/codegen.cc
+++ b/deps/v8/src/codegen.cc
@@ -107,7 +107,6 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
- code->set_prologue_offset(info->prologue_offset());
}
return code;
}
diff --git a/deps/v8/src/codegen.h b/deps/v8/src/codegen.h
index 0ac68c2ea..08a777f2a 100644
--- a/deps/v8/src/codegen.h
+++ b/deps/v8/src/codegen.h
@@ -90,7 +90,6 @@ namespace internal {
typedef double (*UnaryMathFunction)(double x);
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type);
-UnaryMathFunction CreateExpFunction();
UnaryMathFunction CreateSqrtFunction();
@@ -104,19 +103,6 @@ class ElementsTransitionGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
};
-
-class SeqStringSetCharGenerator : public AllStatic {
- public:
- static void Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value);
- private:
- DISALLOW_COPY_AND_ASSIGN(SeqStringSetCharGenerator);
-};
-
-
} } // namespace v8::internal
#endif // V8_CODEGEN_H_
diff --git a/deps/v8/src/collection.js b/deps/v8/src/collection.js
index b3c2db72d..d36fe18fa 100644
--- a/deps/v8/src/collection.js
+++ b/deps/v8/src/collection.js
@@ -88,25 +88,6 @@ function SetDelete(key) {
}
-function SetGetSize() {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.size', this]);
- }
- return %SetGetSize(this);
-}
-
-
-function SetClear() {
- if (!IS_SET(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Set.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %SetInitialize(this);
-}
-
-
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
@@ -164,25 +145,6 @@ function MapDelete(key) {
}
-function MapGetSize() {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.size', this]);
- }
- return %MapGetSize(this);
-}
-
-
-function MapClear() {
- if (!IS_MAP(this)) {
- throw MakeTypeError('incompatible_method_receiver',
- ['Map.prototype.clear', this]);
- }
- // Replace the internal table with a new empty table.
- %MapInitialize(this);
-}
-
-
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
@@ -253,22 +215,18 @@ function WeakMapDelete(key) {
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
- InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
- "delete", SetDelete,
- "clear", SetClear
+ "delete", SetDelete
));
// Set up the non-enumerable functions on the Map prototype object.
- InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
- "delete", MapDelete,
- "clear", MapClear
+ "delete", MapDelete
));
// Set up the WeakMap constructor function.
diff --git a/deps/v8/src/compilation-cache.cc b/deps/v8/src/compilation-cache.cc
index 904e84fd6..c0645760b 100644
--- a/deps/v8/src/compilation-cache.cc
+++ b/deps/v8/src/compilation-cache.cc
@@ -98,7 +98,7 @@ void CompilationSubCache::Age() {
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
- Object* undefined = isolate()->heap()->undefined_value();
+ Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
diff --git a/deps/v8/src/compiler.cc b/deps/v8/src/compiler.cc
index 7e4eaa2b3..86374371e 100644
--- a/deps/v8/src/compiler.cc
+++ b/deps/v8/src/compiler.cc
@@ -52,53 +52,57 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE)),
+ : isolate_(script->GetIsolate()),
+ flags_(LanguageModeField::encode(CLASSIC_MODE)),
+ function_(NULL),
+ scope_(NULL),
+ global_scope_(NULL),
script_(script),
- osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ extension_(NULL),
+ pre_parse_data_(NULL),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
+ Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : isolate_(shared_info->GetIsolate()),
+ flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ function_(NULL),
+ scope_(NULL),
+ global_scope_(NULL),
shared_info_(shared_info),
script_(Handle<Script>(Script::cast(shared_info->script()))),
- osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
+ extension_(NULL),
+ pre_parse_data_(NULL),
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
+ Initialize(BASE);
}
CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
- : flags_(LanguageModeField::encode(CLASSIC_MODE) | IsLazy::encode(true)),
+ : isolate_(closure->GetIsolate()),
+ flags_(LanguageModeField::encode(CLASSIC_MODE) |
+ IsLazy::encode(true)),
+ function_(NULL),
+ scope_(NULL),
+ global_scope_(NULL),
closure_(closure),
shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
script_(Handle<Script>(Script::cast(shared_info_->script()))),
+ extension_(NULL),
+ pre_parse_data_(NULL),
context_(closure->context()),
- osr_ast_id_(BailoutId::None()) {
- Initialize(zone);
-}
-
-
-void CompilationInfo::Initialize(Zone* zone) {
- isolate_ = script_->GetIsolate();
- function_ = NULL;
- scope_ = NULL;
- global_scope_ = NULL;
- extension_ = NULL;
- pre_parse_data_ = NULL;
- zone_ = zone;
- deferred_handles_ = NULL;
- prologue_offset_ = kPrologueOffsetNotSet;
- mode_ = V8::UseCrankshaft() ? BASE : NONOPT;
- if (script_->type()->value() == Script::TYPE_NATIVE) {
- MarkAsNative();
- }
- if (!shared_info_.is_null()) {
- ASSERT(language_mode() == CLASSIC_MODE);
- SetLanguageMode(shared_info_->language_mode());
- }
- set_bailout_reason("unknown");
+ osr_ast_id_(BailoutId::None()),
+ zone_(zone),
+ deferred_handles_(NULL) {
+ Initialize(BASE);
}
@@ -190,11 +194,6 @@ void OptimizingCompiler::RecordOptimizationStats() {
code_size,
compilation_time);
}
- if (FLAG_hydrogen_stats) {
- HStatistics::Instance()->IncrementSubtotals(time_taken_to_create_graph_,
- time_taken_to_optimize_,
- time_taken_to_codegen_);
- }
}
@@ -285,6 +284,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
// doesn't have deoptimization support. Alternatively, we may decide to
// run the full code generator to get a baseline for the compile-time
// performance of the hydrogen-based compiler.
+ Timer t(this, &time_taken_to_create_graph_);
bool should_recompile = !info()->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
@@ -324,8 +324,7 @@ OptimizingCompiler::Status OptimizingCompiler::CreateGraph() {
oracle_ = new(info()->zone()) TypeFeedbackOracle(
code, native_context, info()->isolate(), info()->zone());
graph_builder_ = new(info()->zone()) HGraphBuilder(info(), oracle_);
-
- Timer t(this, &time_taken_to_create_graph_);
+ HPhase phase(HPhase::kTotal);
graph_ = graph_builder_->CreateGraph();
if (info()->isolate()->has_pending_exception()) {
@@ -372,17 +371,15 @@ OptimizingCompiler::Status OptimizingCompiler::OptimizeGraph() {
OptimizingCompiler::Status OptimizingCompiler::GenerateAndInstallCode() {
ASSERT(last_status() == SUCCEEDED);
- { // Scope for timer.
- Timer timer(this, &time_taken_to_codegen_);
- ASSERT(chunk_ != NULL);
- ASSERT(graph_ != NULL);
- Handle<Code> optimized_code = chunk_->Codegen();
- if (optimized_code.is_null()) {
- info()->set_bailout_reason("code generation failed");
- return AbortOptimization();
- }
- info()->SetCode(optimized_code);
+ Timer timer(this, &time_taken_to_codegen_);
+ ASSERT(chunk_ != NULL);
+ ASSERT(graph_ != NULL);
+ Handle<Code> optimized_code = chunk_->Codegen();
+ if (optimized_code.is_null()) {
+ info()->set_bailout_reason("code generation failed");
+ return AbortOptimization();
}
+ info()->SetCode(optimized_code);
RecordOptimizationStats();
return SetLastStatus(SUCCEEDED);
}
@@ -393,8 +390,6 @@ static bool GenerateCode(CompilationInfo* info) {
!info->IsCompilingForDebugging() &&
info->IsOptimizing();
if (is_optimizing) {
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
return MakeCrankshaftCode(info);
} else {
if (info->IsOptimizing()) {
@@ -402,8 +397,6 @@ static bool GenerateCode(CompilationInfo* info) {
// BASE or NONOPT.
info->DisableOptimization();
}
- Logger::TimerEventScope timer(
- info->isolate(), Logger::TimerEventScope::v8_compile_full_code);
return FullCodeGenerator::MakeCode(info);
}
}
@@ -439,9 +432,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
ASSERT(!isolate->native_context().is_null());
Handle<Script> script = info->script();
- // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
- FixedArray* array = isolate->native_context()->embedder_data();
- script->set_context_data(array->get(0));
+ script->set_context_data((*isolate->native_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@@ -697,7 +688,7 @@ static bool InstallFullCode(CompilationInfo* info) {
Handle<ScopeInfo> scope_info =
ScopeInfo::Create(info->scope(), info->zone());
shared->set_scope_info(*scope_info);
- shared->ReplaceCode(*code);
+ shared->set_code(*code);
if (!function.is_null()) {
function->ReplaceCode(*code);
ASSERT(!function->IsOptimized());
@@ -850,11 +841,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
ASSERT(closure->IsMarkedForParallelRecompilation());
Isolate* isolate = closure->GetIsolate();
- // Here we prepare compile data for the parallel recompilation thread, but
- // this still happens synchronously and interrupts execution.
- Logger::TimerEventScope timer(
- isolate, Logger::TimerEventScope::v8_recompile_synchronous);
-
if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
if (FLAG_trace_parallel_recompilation) {
PrintF(" ** Compilation queue, will retry opting on next run.\n");
@@ -863,7 +849,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(closure));
- VMState state(isolate, PARALLEL_COMPILER);
+ VMState state(isolate, PARALLEL_COMPILER_PROLOGUE);
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
@@ -874,10 +860,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
{
CompilationHandleScope handle_scope(*info);
- if (!FLAG_manual_parallel_recompilation &&
- InstallCodeFromOptimizedCodeMap(*info)) {
- return;
- }
+ if (InstallCodeFromOptimizedCodeMap(*info)) return;
if (ParserApi::Parse(*info, kNoParsingFlags)) {
LanguageMode language_mode = info->function()->language_mode();
@@ -911,10 +894,6 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
void Compiler::InstallOptimizedCode(OptimizingCompiler* optimizing_compiler) {
SmartPointer<CompilationInfo> info(optimizing_compiler->info());
- Isolate* isolate = info->isolate();
- VMState state(isolate, PARALLEL_COMPILER);
- Logger::TimerEventScope timer(
- isolate, Logger::TimerEventScope::v8_recompile_synchronous);
// If crankshaft succeeded, install the optimized code else install
// the unoptimized code.
OptimizingCompiler::Status status = optimizing_compiler->last_status();
diff --git a/deps/v8/src/compiler.h b/deps/v8/src/compiler.h
index 653d5f124..af9459566 100644
--- a/deps/v8/src/compiler.h
+++ b/deps/v8/src/compiler.h
@@ -35,8 +35,6 @@
namespace v8 {
namespace internal {
-static const int kPrologueOffsetNotSet = -1;
-
class ScriptDataImpl;
// CompilationInfo encapsulates some information known at compile time. It
@@ -188,16 +186,6 @@ class CompilationInfo {
const char* bailout_reason() const { return bailout_reason_; }
void set_bailout_reason(const char* reason) { bailout_reason_ = reason; }
- int prologue_offset() const {
- ASSERT_NE(kPrologueOffsetNotSet, prologue_offset_);
- return prologue_offset_;
- }
-
- void set_prologue_offset(int prologue_offset) {
- ASSERT_EQ(kPrologueOffsetNotSet, prologue_offset_);
- prologue_offset_ = prologue_offset;
- }
-
private:
Isolate* isolate_;
@@ -212,7 +200,18 @@ class CompilationInfo {
NONOPT
};
- void Initialize(Zone* zone);
+ void Initialize(Mode mode) {
+ mode_ = V8::UseCrankshaft() ? mode : NONOPT;
+ ASSERT(!script_.is_null());
+ if (script_->type()->value() == Script::TYPE_NATIVE) {
+ MarkAsNative();
+ }
+ if (!shared_info_.is_null()) {
+ ASSERT(language_mode() == CLASSIC_MODE);
+ SetLanguageMode(shared_info_->language_mode());
+ }
+ set_bailout_reason("unknown");
+ }
void SetMode(Mode mode) {
ASSERT(V8::UseCrankshaft());
@@ -286,8 +285,6 @@ class CompilationInfo {
const char* bailout_reason_;
- int prologue_offset_;
-
DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
};
@@ -296,8 +293,6 @@ class CompilationInfo {
// Zone on construction and deallocates it on exit.
class CompilationInfoWithZone: public CompilationInfo {
public:
- INLINE(void* operator new(size_t size)) { return Malloced::New(size); }
-
explicit CompilationInfoWithZone(Handle<Script> script)
: CompilationInfo(script, &zone_),
zone_(script->GetIsolate()),
diff --git a/deps/v8/src/contexts.cc b/deps/v8/src/contexts.cc
index 4cb52d3a6..93c979540 100644
--- a/deps/v8/src/contexts.cc
+++ b/deps/v8/src/contexts.cc
@@ -55,15 +55,6 @@ JSBuiltinsObject* Context::builtins() {
}
-Context* Context::global_context() {
- Context* current = this;
- while (!current->IsGlobalContext()) {
- current = current->previous();
- }
- return current;
-}
-
-
Context* Context::native_context() {
// Fast case: the global object for this context has been set. In
// that case, the global object has a direct pointer to the global
@@ -192,10 +183,6 @@ Handle<Object> Context::Lookup(Handle<String> name,
? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
IMMUTABLE_IS_INITIALIZED_HARMONY;
break;
- case MODULE:
- *attributes = READ_ONLY;
- *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
- break;
case DYNAMIC:
case DYNAMIC_GLOBAL:
case DYNAMIC_LOCAL:
@@ -264,6 +251,8 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
}
+ CHECK(function->next_function_link()->IsUndefined());
+
// Check that the context belongs to the weak native contexts list.
bool found = false;
Object* context = GetHeap()->native_contexts_list();
@@ -276,16 +265,6 @@ void Context::AddOptimizedFunction(JSFunction* function) {
}
CHECK(found);
#endif
-
- // If the function link field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (!function->next_function_link()->IsUndefined()) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(function);
- }
-
- ASSERT(function->next_function_link()->IsUndefined());
-
function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
set(OPTIMIZED_FUNCTIONS_LIST, function);
}
diff --git a/deps/v8/src/contexts.h b/deps/v8/src/contexts.h
index 745ba6534..28e4af536 100644
--- a/deps/v8/src/contexts.h
+++ b/deps/v8/src/contexts.h
@@ -152,7 +152,7 @@ enum BindingFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
- V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
+ V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
error_message_for_code_gen_from_strings) \
@@ -161,9 +161,7 @@ enum BindingFlags {
V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
- V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
- V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
- V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
+ V(PROXY_ENUMERATE, JSFunction, proxy_enumerate) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@@ -283,16 +281,14 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
- EMBEDDER_DATA_INDEX,
+ CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX,
TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX,
DERIVED_HAS_TRAP_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
- PROXY_ENUMERATE_INDEX,
- OBSERVERS_NOTIFY_CHANGE_INDEX,
- OBSERVERS_DELIVER_CHANGES_INDEX,
+ PROXY_ENUMERATE,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@@ -345,19 +341,12 @@ class Context: public FixedArray {
// The builtins object.
JSBuiltinsObject* builtins();
- // Get the innermost global context by traversing the context chain.
- Context* global_context();
-
// Compute the native context by traversing the context chain.
Context* native_context();
- // Predicates for context types. IsNativeContext is also defined on Object
+ // Predicates for context types. IsNativeContext is defined on Object
// because we frequently have to know if arbitrary objects are natives
// contexts.
- bool IsNativeContext() {
- Map* map = this->map();
- return map == map->GetHeap()->native_context_map();
- }
bool IsFunctionContext() {
Map* map = this->map();
return map == map->GetHeap()->function_context_map();
@@ -457,9 +446,6 @@ class Context: public FixedArray {
static bool IsBootstrappingOrValidParentContext(Object* object, Context* kid);
static bool IsBootstrappingOrGlobalObject(Object* object);
#endif
-
- STATIC_CHECK(kHeaderSize == Internals::kContextHeaderSize);
- STATIC_CHECK(EMBEDDER_DATA_INDEX == Internals::kContextEmbedderDataIndex);
};
} } // namespace v8::internal
diff --git a/deps/v8/src/counters.cc b/deps/v8/src/counters.cc
index 6d453d6aa..811c0aa2e 100644
--- a/deps/v8/src/counters.cc
+++ b/deps/v8/src/counters.cc
@@ -77,7 +77,7 @@ void* Histogram::CreateHistogram() const {
// Start the timer.
void HistogramTimer::Start() {
- if (histogram_.Enabled() || FLAG_log_internal_timer_events) {
+ if (histogram_.Enabled()) {
stop_time_ = 0;
start_time_ = OS::Ticks();
}
@@ -87,14 +87,11 @@ void HistogramTimer::Start() {
void HistogramTimer::Stop() {
if (histogram_.Enabled()) {
stop_time_ = OS::Ticks();
+
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
histogram_.AddSample(milliseconds);
}
- if (FLAG_log_internal_timer_events) {
- LOG(Isolate::Current(),
- TimerEvent(histogram_.name_, start_time_, OS::Ticks()));
- }
}
} } // namespace v8::internal
diff --git a/deps/v8/src/d8.cc b/deps/v8/src/d8.cc
index 8233f861e..b3b1bb8a1 100644
--- a/deps/v8/src/d8.cc
+++ b/deps/v8/src/d8.cc
@@ -67,62 +67,6 @@
namespace v8 {
-
-static Handle<Value> Throw(const char* message) {
- return ThrowException(String::New(message));
-}
-
-
-// TODO(rossberg): should replace these by proper uses of HasInstance,
-// once we figure out a good way to make the templates global.
-const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
-const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
-
-
-#define FOR_EACH_SYMBOL(V) \
- V(ArrayBuffer, "ArrayBuffer") \
- V(ArrayBufferMarkerPropName, kArrayBufferMarkerPropName) \
- V(ArrayMarkerPropName, kArrayMarkerPropName) \
- V(buffer, "buffer") \
- V(byteLength, "byteLength") \
- V(byteOffset, "byteOffset") \
- V(BYTES_PER_ELEMENT, "BYTES_PER_ELEMENT") \
- V(length, "length")
-
-
-class Symbols {
- public:
- explicit Symbols(Isolate* isolate) : isolate_(isolate) {
- HandleScope scope;
-#define INIT_SYMBOL(name, value) \
- name##_ = Persistent<String>::New(String::NewSymbol(value));
- FOR_EACH_SYMBOL(INIT_SYMBOL)
-#undef INIT_SYMBOL
- isolate->SetData(this);
- }
-
- ~Symbols() {
-#define DISPOSE_SYMBOL(name, value) name##_.Dispose();
- FOR_EACH_SYMBOL(DISPOSE_SYMBOL)
-#undef DISPOSE_SYMBOL
- isolate_->SetData(NULL); // Not really needed, just to be sure...
- }
-
-#define DEFINE_SYMBOL_GETTER(name, value) \
- static Persistent<String> name(Isolate* isolate) { \
- return reinterpret_cast<Symbols*>(isolate->GetData())->name##_; \
- }
- FOR_EACH_SYMBOL(DEFINE_SYMBOL_GETTER)
-#undef DEFINE_SYMBOL_GETTER
-
- private:
- Isolate* isolate_;
-#define DEFINE_MEMBER(name, value) Persistent<String> name##_;
- FOR_EACH_SYMBOL(DEFINE_MEMBER)
-#undef DEFINE_MEMBER
-};
-
-
LineEditor *LineEditor::first_ = NULL;
@@ -148,17 +92,17 @@ LineEditor* LineEditor::Get() {
class DumbLineEditor: public LineEditor {
public:
- explicit DumbLineEditor(Isolate* isolate)
- : LineEditor(LineEditor::DUMB, "dumb"), isolate_(isolate) { }
+ DumbLineEditor() : LineEditor(LineEditor::DUMB, "dumb") { }
virtual Handle<String> Prompt(const char* prompt);
- private:
- Isolate* isolate_;
};
+static DumbLineEditor dumb_line_editor;
+
+
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
- return Shell::ReadFromStdin(isolate_);
+ return Shell::ReadFromStdin();
}
@@ -171,6 +115,7 @@ i::Mutex* Shell::context_mutex_(i::OS::CreateMutex());
Persistent<Context> Shell::utility_context_;
#endif // V8_SHARED
+LineEditor* Shell::console = NULL;
Persistent<Context> Shell::evaluation_context_;
ShellOptions Shell::options;
const char* Shell::kPrompt = "d8> ";
@@ -287,17 +232,17 @@ Handle<Value> Shell::DisableProfiler(const Arguments& args) {
Handle<Value> Shell::Read(const Arguments& args) {
String::Utf8Value file(args[0]);
if (*file == NULL) {
- return Throw("Error loading file");
+ return ThrowException(String::New("Error loading file"));
}
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return Throw("Error loading file");
+ return ThrowException(String::New("Error loading file"));
}
return source;
}
-Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
+Handle<String> Shell::ReadFromStdin() {
static const int kBufferSize = 256;
char buffer[kBufferSize];
Handle<String> accumulator = String::New("");
@@ -308,7 +253,7 @@ Handle<String> Shell::ReadFromStdin(Isolate* isolate) {
// If fgets gets an error, just give up.
char* input = NULL;
{ // Release lock for blocking input.
- Unlocker unlock(isolate);
+ Unlocker unlock(Isolate::GetCurrent());
input = fgets(buffer, kBufferSize, stdin);
}
if (input == NULL) return Handle<String>();
@@ -332,14 +277,14 @@ Handle<Value> Shell::Load(const Arguments& args) {
HandleScope handle_scope;
String::Utf8Value file(args[i]);
if (*file == NULL) {
- return Throw("Error loading file");
+ return ThrowException(String::New("Error loading file"));
}
- Handle<String> source = ReadFile(args.GetIsolate(), *file);
+ Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
- return Throw("Error loading file");
+ return ThrowException(String::New("Error loading file"));
}
if (!ExecuteString(source, String::New(*file), false, true)) {
- return Throw("Error executing file");
+ return ThrowException(String::New("Error executing file"));
}
}
return Undefined();
@@ -369,7 +314,7 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
if (try_catch->HasCaught()) return 0;
if (raw_value < 0) {
- Throw("Array length must not be negative.");
+ ThrowException(String::New("Array length must not be negative."));
return 0;
}
@@ -378,27 +323,33 @@ static int32_t convertToUint(Local<Value> value_in, TryCatch* try_catch) {
ASSERT(kMaxLength == i::ExternalArray::kMaxLength);
#endif // V8_SHARED
if (raw_value > static_cast<int32_t>(kMaxLength)) {
- Throw("Array length exceeds maximum length.");
+ ThrowException(
+ String::New("Array length exceeds maximum length."));
}
return raw_value;
}
-Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
- Handle<Object> buffer,
+// TODO(rossberg): should replace these by proper uses of HasInstance,
+// once we figure out a good way to make the templates global.
+const char kArrayBufferMarkerPropName[] = "d8::_is_array_buffer_";
+const char kArrayMarkerPropName[] = "d8::_is_typed_array_";
+
+
+Handle<Value> Shell::CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t length) {
static const int32_t kMaxSize = 0x7fffffff;
// Make sure the total size fits into a (signed) int.
if (length < 0 || length > kMaxSize) {
- return Throw("ArrayBuffer exceeds maximum size (2G)");
+ return ThrowException(String::New("ArrayBuffer exceeds maximum size (2G)"));
}
uint8_t* data = new uint8_t[length];
if (data == NULL) {
- return Throw("Memory allocation failed");
+ return ThrowException(String::New("Memory allocation failed"));
}
memset(data, 0, length);
- buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
+ buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_array = Persistent<Object>::New(buffer);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
@@ -406,7 +357,7 @@ Handle<Value> Shell::CreateExternalArrayBuffer(Isolate* isolate,
buffer->SetIndexedPropertiesToExternalArrayData(
data, v8::kExternalByteArray, length);
- buffer->Set(Symbols::byteLength(isolate), Int32::New(length), ReadOnly);
+ buffer->Set(String::New("byteLength"), Int32::New(length), ReadOnly);
return buffer;
}
@@ -422,18 +373,18 @@ Handle<Value> Shell::ArrayBuffer(const Arguments& args) {
}
if (args.Length() == 0) {
- return Throw("ArrayBuffer constructor must have one argument");
+ return ThrowException(
+ String::New("ArrayBuffer constructor must have one argument"));
}
TryCatch try_catch;
int32_t length = convertToUint(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
- return CreateExternalArrayBuffer(args.GetIsolate(), args.This(), length);
+ return CreateExternalArrayBuffer(args.This(), length);
}
-Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
- Handle<Object> array,
+Handle<Object> Shell::CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
@@ -449,13 +400,12 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
array->SetIndexedPropertiesToExternalArrayData(
static_cast<uint8_t*>(data) + byteOffset, type, length);
- array->SetHiddenValue(Symbols::ArrayMarkerPropName(isolate),
- Int32::New(type));
- array->Set(Symbols::byteLength(isolate), Int32::New(byteLength), ReadOnly);
- array->Set(Symbols::byteOffset(isolate), Int32::New(byteOffset), ReadOnly);
- array->Set(Symbols::length(isolate), Int32::New(length), ReadOnly);
- array->Set(Symbols::BYTES_PER_ELEMENT(isolate), Int32::New(element_size));
- array->Set(Symbols::buffer(isolate), buffer, ReadOnly);
+ array->SetHiddenValue(String::New(kArrayMarkerPropName), Int32::New(type));
+ array->Set(String::New("byteLength"), Int32::New(byteLength), ReadOnly);
+ array->Set(String::New("byteOffset"), Int32::New(byteOffset), ReadOnly);
+ array->Set(String::New("length"), Int32::New(length), ReadOnly);
+ array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
+ array->Set(String::New("buffer"), buffer, ReadOnly);
return array;
}
@@ -464,7 +414,6 @@ Handle<Object> Shell::CreateExternalArray(Isolate* isolate,
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
int32_t element_size) {
- Isolate* isolate = args.GetIsolate();
if (!args.IsConstructCall()) {
Handle<Value>* rec_args = new Handle<Value>[args.Length()];
for (int i = 0; i < args.Length(); ++i) rec_args[i] = args[i];
@@ -490,15 +439,16 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
int32_t byteOffset;
bool init_from_array = false;
if (args.Length() == 0) {
- return Throw("Array constructor must have at least one argument");
+ return ThrowException(
+ String::New("Array constructor must have at least one argument"));
}
if (args[0]->IsObject() &&
!args[0]->ToObject()->GetHiddenValue(
- Symbols::ArrayBufferMarkerPropName(isolate)).IsEmpty()) {
+ String::New(kArrayBufferMarkerPropName)).IsEmpty()) {
// Construct from ArrayBuffer.
buffer = args[0]->ToObject();
int32_t bufferLength =
- convertToUint(buffer->Get(Symbols::byteLength(isolate)), &try_catch);
+ convertToUint(buffer->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() < 2 || args[1]->IsUndefined()) {
@@ -507,10 +457,11 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = convertToUint(args[1], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (byteOffset > bufferLength) {
- return Throw("byteOffset out of bounds");
+ return ThrowException(String::New("byteOffset out of bounds"));
}
if (byteOffset % element_size != 0) {
- return Throw("byteOffset must be multiple of element size");
+ return ThrowException(
+ String::New("byteOffset must be multiple of element size"));
}
}
@@ -518,22 +469,23 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteLength = bufferLength - byteOffset;
length = byteLength / element_size;
if (byteLength % element_size != 0) {
- return Throw("buffer size must be multiple of element size");
+ return ThrowException(
+ String::New("buffer size must be multiple of element size"));
}
} else {
length = convertToUint(args[2], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
byteLength = length * element_size;
if (byteOffset + byteLength > bufferLength) {
- return Throw("length out of bounds");
+ return ThrowException(String::New("length out of bounds"));
}
}
} else {
if (args[0]->IsObject() &&
- args[0]->ToObject()->Has(Symbols::length(isolate))) {
+ args[0]->ToObject()->Has(String::New("length"))) {
// Construct from array.
length = convertToUint(
- args[0]->ToObject()->Get(Symbols::length(isolate)), &try_catch);
+ args[0]->ToObject()->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
init_from_array = true;
} else {
@@ -545,7 +497,7 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
byteOffset = 0;
Handle<Object> global = Context::GetCurrent()->Global();
- Handle<Value> array_buffer = global->Get(Symbols::ArrayBuffer(isolate));
+ Handle<Value> array_buffer = global->Get(String::New("ArrayBuffer"));
ASSERT(!try_catch.HasCaught() && array_buffer->IsFunction());
Handle<Value> buffer_args[] = { Uint32::New(byteLength) };
Handle<Value> result = Handle<Function>::Cast(array_buffer)->NewInstance(
@@ -554,9 +506,8 @@ Handle<Value> Shell::CreateExternalArray(const Arguments& args,
buffer = result->ToObject();
}
- Handle<Object> array =
- CreateExternalArray(isolate, args.This(), buffer, type, length,
- byteLength, byteOffset, element_size);
+ Handle<Object> array = CreateExternalArray(
+ args.This(), buffer, type, length, byteLength, byteOffset, element_size);
if (init_from_array) {
Handle<Object> init = args[0]->ToObject();
@@ -571,23 +522,25 @@ Handle<Value> Shell::ArrayBufferSlice(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return Throw("'slice' invoked on non-object receiver");
+ return ThrowException(
+ String::New("'slice' invoked on non-object receiver"));
}
- Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate));
+ self->GetHiddenValue(String::New(kArrayBufferMarkerPropName));
if (marker.IsEmpty()) {
- return Throw("'slice' invoked on wrong receiver type");
+ return ThrowException(
+ String::New("'slice' invoked on wrong receiver type"));
}
int32_t length =
- convertToUint(self->Get(Symbols::byteLength(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("byteLength")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return Throw("'slice' must have at least one argument");
+ return ThrowException(
+ String::New("'slice' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -626,31 +579,32 @@ Handle<Value> Shell::ArraySubArray(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return Throw("'subarray' invoked on non-object receiver");
+ return ThrowException(
+ String::New("'subarray' invoked on non-object receiver"));
}
- Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
- return Throw("'subarray' invoked on wrong receiver type");
+ return ThrowException(
+ String::New("'subarray' invoked on wrong receiver type"));
}
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t length =
- convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return Throw("'subarray' must have at least one argument");
+ return ThrowException(
+ String::New("'subarray' must have at least one argument"));
}
int32_t begin = convertToInt(args[0], &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
@@ -685,33 +639,35 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
TryCatch try_catch;
if (!args.This()->IsObject()) {
- return Throw("'set' invoked on non-object receiver");
+ return ThrowException(
+ String::New("'set' invoked on non-object receiver"));
}
- Isolate* isolate = args.GetIsolate();
Local<Object> self = args.This();
- Local<Value> marker =
- self->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate));
+ Local<Value> marker = self->GetHiddenValue(String::New(kArrayMarkerPropName));
if (marker.IsEmpty()) {
- return Throw("'set' invoked on wrong receiver type");
+ return ThrowException(
+ String::New("'set' invoked on wrong receiver type"));
}
int32_t length =
- convertToUint(self->Get(Symbols::length(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t element_size =
- convertToUint(self->Get(Symbols::BYTES_PER_ELEMENT(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (args.Length() == 0) {
- return Throw("'set' must have at least one argument");
+ return ThrowException(
+ String::New("'set' must have at least one argument"));
}
if (!args[0]->IsObject() ||
- !args[0]->ToObject()->Has(Symbols::length(isolate))) {
- return Throw("'set' invoked with non-array argument");
+ !args[0]->ToObject()->Has(String::New("length"))) {
+ return ThrowException(
+ String::New("'set' invoked with non-array argument"));
}
Handle<Object> source = args[0]->ToObject();
int32_t source_length =
- convertToUint(source->Get(Symbols::length(isolate)), &try_catch);
+ convertToUint(source->Get(String::New("length")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t offset;
@@ -722,32 +678,31 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (offset + source_length > length) {
- return Throw("offset or source length out of bounds");
+ return ThrowException(String::New("offset or source length out of bounds"));
}
int32_t source_element_size;
- if (source->GetHiddenValue(Symbols::ArrayMarkerPropName(isolate)).IsEmpty()) {
+ if (source->GetHiddenValue(String::New(kArrayMarkerPropName)).IsEmpty()) {
source_element_size = 0;
} else {
source_element_size =
- convertToUint(source->Get(Symbols::BYTES_PER_ELEMENT(isolate)),
- &try_catch);
+ convertToUint(source->Get(String::New("BYTES_PER_ELEMENT")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
}
if (element_size == source_element_size &&
self->GetConstructor()->StrictEquals(source->GetConstructor())) {
// Use memmove on the array buffers.
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(Symbols::buffer(isolate))->ToObject();
+ source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
uint8_t* dest = byteOffset + offset * element_size + static_cast<uint8_t*>(
@@ -763,10 +718,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
}
} else {
// Need to copy element-wise to make the right conversions.
- Handle<Object> buffer = self->Get(Symbols::buffer(isolate))->ToObject();
+ Handle<Object> buffer = self->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
Handle<Object> source_buffer =
- source->Get(Symbols::buffer(isolate))->ToObject();
+ source->Get(String::New("buffer"))->ToObject();
if (try_catch.HasCaught()) return try_catch.ReThrow();
if (buffer->StrictEquals(source_buffer)) {
@@ -774,10 +729,10 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
// This gets a bit tricky in the case of different element sizes
// (which, of course, is extremely unlikely to ever occur in practice).
int32_t byteOffset =
- convertToUint(self->Get(Symbols::byteOffset(isolate)), &try_catch);
+ convertToUint(self->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
int32_t source_byteOffset =
- convertToUint(source->Get(Symbols::byteOffset(isolate)), &try_catch);
+ convertToUint(source->Get(String::New("byteOffset")), &try_catch);
if (try_catch.HasCaught()) return try_catch.ReThrow();
// Copy as much as we can from left to right.
@@ -823,9 +778,8 @@ Handle<Value> Shell::ArraySet(const Arguments& args) {
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
HandleScope scope;
- Isolate* isolate = Isolate::GetCurrent();
int32_t length =
- object->ToObject()->Get(Symbols::byteLength(isolate))->Uint32Value();
+ object->ToObject()->Get(String::New("byteLength"))->Uint32Value();
V8::AdjustAmountOfExternalAllocatedMemory(-length);
delete[] static_cast<uint8_t*>(data);
object.Dispose();
@@ -1191,7 +1145,7 @@ Handle<FunctionTemplate> Shell::CreateArrayTemplate(InvocationCallback fun) {
}
-Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
+Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@@ -1211,7 +1165,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
// Bind the handlers for external arrays.
PropertyAttribute attr =
static_cast<PropertyAttribute>(ReadOnly | DontDelete);
- global_template->Set(Symbols::ArrayBuffer(isolate),
+ global_template->Set(String::New("ArrayBuffer"),
CreateArrayBufferTemplate(ArrayBuffer), attr);
global_template->Set(String::New("Int8Array"),
CreateArrayTemplate(Int8Array), attr);
@@ -1248,7 +1202,7 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
}
-void Shell::Initialize(Isolate* isolate) {
+void Shell::Initialize() {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
@@ -1269,15 +1223,12 @@ void Shell::Initialize(Isolate* isolate) {
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
#endif // V8_SHARED
-}
-
-
-void Shell::InitializeDebugger(Isolate* isolate) {
if (options.test_shell) return;
+
#ifndef V8_SHARED
Locker lock;
HandleScope scope;
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -1291,13 +1242,13 @@ void Shell::InitializeDebugger(Isolate* isolate) {
}
-Persistent<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
+Persistent<Context> Shell::CreateEvaluationContext() {
#ifndef V8_SHARED
// This needs to be a critical section since this is not thread-safe
i::ScopedLock lock(context_mutex_);
#endif // V8_SHARED
// Initialize the global objects
- Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
+ Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
Persistent<Context> context = Context::New(NULL, global_template);
ASSERT(!context.IsEmpty());
Context::Scope scope(context);
@@ -1343,6 +1294,7 @@ int CompareKeys(const void* a, const void* b) {
void Shell::OnExit() {
+ if (console != NULL) console->Close();
if (i::FLAG_dump_counters) {
int number_of_counters = 0;
for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1402,9 +1354,9 @@ static FILE* FOpen(const char* path, const char* mode) {
}
-static char* ReadChars(Isolate* isolate, const char* name, int* size_out) {
+static char* ReadChars(const char* name, int* size_out) {
// Release the V8 lock while reading files.
- v8::Unlocker unlocker(isolate);
+ v8::Unlocker unlocker(Isolate::GetCurrent());
FILE* file = FOpen(name, "rb");
if (file == NULL) return NULL;
@@ -1429,17 +1381,15 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
String::Utf8Value filename(args[0]);
int length;
if (*filename == NULL) {
- return Throw("Error loading file");
+ return ThrowException(String::New("Error loading file"));
}
- uint8_t* data = reinterpret_cast<uint8_t*>(
- ReadChars(args.GetIsolate(), *filename, &length));
+ uint8_t* data = reinterpret_cast<uint8_t*>(ReadChars(*filename, &length));
if (data == NULL) {
- return Throw("Error reading file");
+ return ThrowException(String::New("Error reading file"));
}
- Isolate* isolate = args.GetIsolate();
Handle<Object> buffer = Object::New();
- buffer->SetHiddenValue(Symbols::ArrayBufferMarkerPropName(isolate), True());
+ buffer->SetHiddenValue(String::New(kArrayBufferMarkerPropName), True());
Persistent<Object> persistent_buffer = Persistent<Object>::New(buffer);
persistent_buffer.MakeWeak(data, ExternalArrayWeakCallback);
persistent_buffer.MarkIndependent();
@@ -1447,7 +1397,7 @@ Handle<Value> Shell::ReadBuffer(const Arguments& args) {
buffer->SetIndexedPropertiesToExternalArrayData(
data, kExternalUnsignedByteArray, length);
- buffer->Set(Symbols::byteLength(isolate),
+ buffer->Set(String::New("byteLength"),
Int32::New(static_cast<int32_t>(length)), ReadOnly);
return buffer;
}
@@ -1477,9 +1427,9 @@ static char* ReadWord(char* data) {
// Reads a file into a v8 string.
-Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
+Handle<String> Shell::ReadFile(const char* name) {
int size = 0;
- char* chars = ReadChars(isolate, name, &size);
+ char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars);
delete[] chars;
@@ -1487,13 +1437,12 @@ Handle<String> Shell::ReadFile(Isolate* isolate, const char* name) {
}
-void Shell::RunShell(Isolate* isolate) {
+void Shell::RunShell() {
Locker locker;
Context::Scope context_scope(evaluation_context_);
HandleScope outer_scope;
Handle<String> name = String::New("(d8)");
- DumbLineEditor dumb_line_editor(isolate);
- LineEditor* console = LineEditor::Get();
+ console = LineEditor::Get();
printf("V8 version %s [console: %s]\n", V8::GetVersion(), console->name());
console->Open();
while (true) {
@@ -1503,7 +1452,6 @@ void Shell::RunShell(Isolate* isolate) {
ExecuteString(input, name, true, true);
}
printf("\n");
- console->Close();
}
@@ -1511,9 +1459,9 @@ void Shell::RunShell(Isolate* isolate) {
class ShellThread : public i::Thread {
public:
// Takes ownership of the underlying char array of |files|.
- ShellThread(Isolate* isolate, char* files)
+ ShellThread(int no, char* files)
: Thread("d8:ShellThread"),
- isolate_(isolate), files_(files) { }
+ no_(no), files_(files) { }
~ShellThread() {
delete[] files_;
@@ -1521,7 +1469,7 @@ class ShellThread : public i::Thread {
virtual void Run();
private:
- Isolate* isolate_;
+ int no_;
char* files_;
};
@@ -1541,8 +1489,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker;
HandleScope outer_scope;
- Persistent<Context> thread_context =
- Shell::CreateEvaluationContext(isolate_);
+ Persistent<Context> thread_context = Shell::CreateEvaluationContext();
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@@ -1555,7 +1502,7 @@ void ShellThread::Run() {
continue;
}
- Handle<String> str = Shell::ReadFile(isolate_, filename);
+ Handle<String> str = Shell::ReadFile(filename);
if (str.IsEmpty()) {
printf("File '%s' not found\n", filename);
Shell::Exit(1);
@@ -1583,7 +1530,7 @@ SourceGroup::~SourceGroup() {
}
-void SourceGroup::Execute(Isolate* isolate) {
+void SourceGroup::Execute() {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
@@ -1601,7 +1548,7 @@ void SourceGroup::Execute(Isolate* isolate) {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = String::New(arg);
- Handle<String> source = ReadFile(isolate, arg);
+ Handle<String> source = ReadFile(arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
Shell::Exit(1);
@@ -1614,9 +1561,9 @@ void SourceGroup::Execute(Isolate* isolate) {
}
-Handle<String> SourceGroup::ReadFile(Isolate* isolate, const char* name) {
+Handle<String> SourceGroup::ReadFile(const char* name) {
int size;
- char* chars = ReadChars(isolate, name, &size);
+ char* chars = ReadChars(name, &size);
if (chars == NULL) return Handle<String>();
Handle<String> result = String::New(chars, size);
delete[] chars;
@@ -1642,11 +1589,10 @@ void SourceGroup::ExecuteInThread() {
Isolate::Scope iscope(isolate);
Locker lock(isolate);
HandleScope scope;
- Symbols symbols(isolate);
- Persistent<Context> context = Shell::CreateEvaluationContext(isolate);
+ Persistent<Context> context = Shell::CreateEvaluationContext();
{
Context::Scope cscope(context);
- Execute(isolate);
+ Execute();
}
context.Dispose();
if (Shell::options.send_idle_notification) {
@@ -1814,21 +1760,21 @@ bool Shell::SetOptions(int argc, char* argv[]) {
}
-int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
+int Shell::RunMain(int argc, char* argv[]) {
#ifndef V8_SHARED
i::List<i::Thread*> threads(1);
if (options.parallel_files != NULL) {
for (int i = 0; i < options.num_parallel_files; i++) {
char* files = NULL;
- { Locker lock(isolate);
+ { Locker lock(Isolate::GetCurrent());
int size = 0;
- files = ReadChars(isolate, options.parallel_files[i], &size);
+ files = ReadChars(options.parallel_files[i], &size);
}
if (files == NULL) {
printf("File list '%s' not found\n", options.parallel_files[i]);
Exit(1);
}
- ShellThread* thread = new ShellThread(isolate, files);
+ ShellThread* thread = new ShellThread(threads.length(), files);
thread->Start();
threads.Add(thread);
}
@@ -1840,7 +1786,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
{ // NOLINT
Locker lock;
HandleScope scope;
- Persistent<Context> context = CreateEvaluationContext(isolate);
+ Persistent<Context> context = CreateEvaluationContext();
if (options.last_run) {
// Keep using the same context in the interactive shell.
evaluation_context_ = context;
@@ -1854,7 +1800,7 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
}
{
Context::Scope cscope(context);
- options.isolate_sources[0].Execute(isolate);
+ options.isolate_sources[0].Execute();
}
if (!options.last_run) {
context.Dispose();
@@ -1896,62 +1842,59 @@ int Shell::RunMain(Isolate* isolate, int argc, char* argv[]) {
int Shell::Main(int argc, char* argv[]) {
if (!SetOptions(argc, argv)) return 1;
+ Initialize();
+
int result = 0;
- Isolate* isolate = Isolate::GetCurrent();
- {
- Initialize(isolate);
- Symbols symbols(isolate);
- InitializeDebugger(isolate);
-
- if (options.stress_opt || options.stress_deopt) {
- Testing::SetStressRunType(options.stress_opt
- ? Testing::kStressTypeOpt
- : Testing::kStressTypeDeopt);
- int stress_runs = Testing::GetStressRuns();
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
- Testing::PrepareStressRun(i);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
- }
- printf("======== Full Deoptimization =======\n");
- Testing::DeoptimizeAll();
+ if (options.stress_opt || options.stress_deopt) {
+ Testing::SetStressRunType(
+ options.stress_opt ? Testing::kStressTypeOpt
+ : Testing::kStressTypeDeopt);
+ int stress_runs = Testing::GetStressRuns();
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
+ Testing::PrepareStressRun(i);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(argc, argv);
+ }
+ printf("======== Full Deoptimization =======\n");
+ Testing::DeoptimizeAll();
#if !defined(V8_SHARED)
- } else if (i::FLAG_stress_runs > 0) {
- int stress_runs = i::FLAG_stress_runs;
- for (int i = 0; i < stress_runs && result == 0; i++) {
- printf("============ Run %d/%d ============\n", i + 1, stress_runs);
- options.last_run = (i == stress_runs - 1);
- result = RunMain(isolate, argc, argv);
- }
-#endif
- } else {
- result = RunMain(isolate, argc, argv);
+ } else if (i::FLAG_stress_runs > 0) {
+ int stress_runs = i::FLAG_stress_runs;
+ for (int i = 0; i < stress_runs && result == 0; i++) {
+ printf("============ Run %d/%d ============\n", i + 1, stress_runs);
+ options.last_run = (i == stress_runs - 1);
+ result = RunMain(argc, argv);
}
+#endif
+ } else {
+ result = RunMain(argc, argv);
+ }
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- // Run remote debugger if requested, but never on --test
- if (i::FLAG_remote_debugger && !options.test_shell) {
- InstallUtilityScript();
- RunRemoteDebugger(i::FLAG_debugger_port);
- return 0;
- }
+ // Run remote debugger if requested, but never on --test
+ if (i::FLAG_remote_debugger && !options.test_shell) {
+ InstallUtilityScript();
+ RunRemoteDebugger(i::FLAG_debugger_port);
+ return 0;
+ }
#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- // Run interactive shell if explicitly requested or if no script has been
- // executed, but never on --test
+ // Run interactive shell if explicitly requested or if no script has been
+ // executed, but never on --test
- if (( options.interactive_shell || !options.script_executed )
- && !options.test_shell ) {
+ if (( options.interactive_shell
+ || !options.script_executed )
+ && !options.test_shell ) {
#if !defined(V8_SHARED) && defined(ENABLE_DEBUGGER_SUPPORT)
- if (!i::FLAG_debugger) {
- InstallUtilityScript();
- }
-#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
- RunShell(isolate);
+ if (!i::FLAG_debugger) {
+ InstallUtilityScript();
}
+#endif // !V8_SHARED && ENABLE_DEBUGGER_SUPPORT
+ RunShell();
}
+
V8::Dispose();
#ifndef V8_SHARED
diff --git a/deps/v8/src/d8.h b/deps/v8/src/d8.h
index 161c6533e..a62a81fd9 100644
--- a/deps/v8/src/d8.h
+++ b/deps/v8/src/d8.h
@@ -158,7 +158,7 @@ class SourceGroup {
void End(int offset) { end_offset_ = offset; }
- void Execute(Isolate* isolate);
+ void Execute();
#ifndef V8_SHARED
void StartExecuteInThread();
@@ -187,7 +187,7 @@ class SourceGroup {
#endif // V8_SHARED
void ExitShell(int exit_code);
- Handle<String> ReadFile(Isolate* isolate, const char* name);
+ Handle<String> ReadFile(const char* name);
const char** argv_;
int begin_offset_;
@@ -272,9 +272,9 @@ class Shell : public i::AllStatic {
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
- static Handle<String> ReadFile(Isolate* isolate, const char* name);
- static Persistent<Context> CreateEvaluationContext(Isolate* isolate);
- static int RunMain(Isolate* isolate, int argc, char* argv[]);
+ static Handle<String> ReadFile(const char* name);
+ static Persistent<Context> CreateEvaluationContext();
+ static int RunMain(int argc, char* argv[]);
static int Main(int argc, char* argv[]);
static void Exit(int exit_code);
@@ -310,9 +310,9 @@ class Shell : public i::AllStatic {
static Handle<Value> DisableProfiler(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadBuffer(const Arguments& args);
- static Handle<String> ReadFromStdin(Isolate* isolate);
+ static Handle<String> ReadFromStdin();
static Handle<Value> ReadLine(const Arguments& args) {
- return ReadFromStdin(args.GetIsolate());
+ return ReadFromStdin();
}
static Handle<Value> Load(const Arguments& args);
static Handle<Value> ArrayBuffer(const Arguments& args);
@@ -365,6 +365,7 @@ class Shell : public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
+ static LineEditor* console;
static const char* kPrompt;
static ShellOptions options;
@@ -383,18 +384,15 @@ class Shell : public i::AllStatic {
static Counter* GetCounter(const char* name, bool is_histogram);
static void InstallUtilityScript();
#endif // V8_SHARED
- static void Initialize(Isolate* isolate);
- static void InitializeDebugger(Isolate* isolate);
- static void RunShell(Isolate* isolate);
+ static void Initialize();
+ static void RunShell();
static bool SetOptions(int argc, char* argv[]);
- static Handle<ObjectTemplate> CreateGlobalTemplate(Isolate* isolate);
+ static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<FunctionTemplate> CreateArrayBufferTemplate(InvocationCallback);
static Handle<FunctionTemplate> CreateArrayTemplate(InvocationCallback);
- static Handle<Value> CreateExternalArrayBuffer(Isolate* isolate,
- Handle<Object> buffer,
+ static Handle<Value> CreateExternalArrayBuffer(Handle<Object> buffer,
int32_t size);
- static Handle<Object> CreateExternalArray(Isolate* isolate,
- Handle<Object> array,
+ static Handle<Object> CreateExternalArray(Handle<Object> array,
Handle<Object> buffer,
ExternalArrayType type,
int32_t length,
diff --git a/deps/v8/src/date.js b/deps/v8/src/date.js
index c75d12c65..a54cb238c 100644
--- a/deps/v8/src/date.js
+++ b/deps/v8/src/date.js
@@ -107,7 +107,7 @@ function MakeDay(year, month, date) {
}
// Now we rely on year and month being SMIs.
- return %DateMakeDay(year | 0, month | 0) + date - 1;
+ return %DateMakeDay(year, month) + date - 1;
}
diff --git a/deps/v8/src/debug-debugger.js b/deps/v8/src/debug-debugger.js
index ea1a17d04..163a0bd82 100644
--- a/deps/v8/src/debug-debugger.js
+++ b/deps/v8/src/debug-debugger.js
@@ -1306,12 +1306,9 @@ ProtocolMessage.prototype.setOption = function(name, value) {
};
-ProtocolMessage.prototype.failed = function(message, opt_details) {
+ProtocolMessage.prototype.failed = function(message) {
this.success = false;
this.message = message;
- if (IS_OBJECT(opt_details)) {
- this.error_details = opt_details;
- }
};
@@ -1358,9 +1355,6 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.message) {
json.message = this.message;
}
- if (this.error_details) {
- json.error_details = this.error_details;
- }
json.running = this.running;
return JSON.stringify(json);
};
@@ -1433,8 +1427,6 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(
this.scopesRequest_(request, response);
} else if (request.command == 'scope') {
this.scopeRequest_(request, response);
- } else if (request.command == 'setVariableValue') {
- this.setVariableValueRequest_(request, response);
} else if (request.command == 'evaluate') {
this.evaluateRequest_(request, response);
} else if (lol_is_enabled && request.command == 'getobj') {
@@ -1961,12 +1953,11 @@ DebugCommandProcessor.prototype.frameRequest_ = function(request, response) {
};
-DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
- function(scope_description) {
+DebugCommandProcessor.prototype.frameForScopeRequest_ = function(request) {
// Get the frame for which the scope or scopes are requested.
// With no frameNumber argument use the currently selected frame.
- if (scope_description && !IS_UNDEFINED(scope_description.frameNumber)) {
- frame_index = scope_description.frameNumber;
+ if (request.arguments && !IS_UNDEFINED(request.arguments.frameNumber)) {
+ frame_index = request.arguments.frameNumber;
if (frame_index < 0 || this.exec_state_.frameCount() <= frame_index) {
throw new Error('Invalid frame number');
}
@@ -1980,13 +1971,13 @@ DebugCommandProcessor.prototype.resolveFrameFromScopeDescription_ =
// Gets scope host object from request. It is either a function
// ('functionHandle' argument must be specified) or a stack frame
// ('frameNumber' may be specified and the current frame is taken by default).
-DebugCommandProcessor.prototype.resolveScopeHolder_ =
- function(scope_description) {
- if (scope_description && "functionHandle" in scope_description) {
- if (!IS_NUMBER(scope_description.functionHandle)) {
+DebugCommandProcessor.prototype.scopeHolderForScopeRequest_ =
+ function(request) {
+ if (request.arguments && "functionHandle" in request.arguments) {
+ if (!IS_NUMBER(request.arguments.functionHandle)) {
throw new Error('Function handle must be a number');
}
- var function_mirror = LookupMirror(scope_description.functionHandle);
+ var function_mirror = LookupMirror(request.arguments.functionHandle);
if (!function_mirror) {
throw new Error('Failed to find function object by handle');
}
@@ -2001,14 +1992,14 @@ DebugCommandProcessor.prototype.resolveScopeHolder_ =
}
// Get the frame for which the scopes are requested.
- var frame = this.resolveFrameFromScopeDescription_(scope_description);
+ var frame = this.frameForScopeRequest_(request);
return frame;
}
}
DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
- var scope_holder = this.resolveScopeHolder_(request.arguments);
+ var scope_holder = this.scopeHolderForScopeRequest_(request);
// Fill all scopes for this frame or function.
var total_scopes = scope_holder.scopeCount();
@@ -2027,7 +2018,7 @@ DebugCommandProcessor.prototype.scopesRequest_ = function(request, response) {
DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
// Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(request.arguments);
+ var scope_holder = this.scopeHolderForScopeRequest_(request);
// With no scope argument just return top scope.
var scope_index = 0;
@@ -2042,77 +2033,6 @@ DebugCommandProcessor.prototype.scopeRequest_ = function(request, response) {
};
-// Reads value from protocol description. Description may be in form of type
-// (for singletons), raw value (primitive types supported in JSON),
-// string value description plus type (for primitive values) or handle id.
-// Returns raw value or throws exception.
-DebugCommandProcessor.resolveValue_ = function(value_description) {
- if ("handle" in value_description) {
- var value_mirror = LookupMirror(value_description.handle);
- if (!value_mirror) {
- throw new Error("Failed to resolve value by handle, ' #" +
- mapping.handle + "# not found");
- }
- return value_mirror.value();
- } else if ("stringDescription" in value_description) {
- if (value_description.type == BOOLEAN_TYPE) {
- return Boolean(value_description.stringDescription);
- } else if (value_description.type == NUMBER_TYPE) {
- return Number(value_description.stringDescription);
- } if (value_description.type == STRING_TYPE) {
- return String(value_description.stringDescription);
- } else {
- throw new Error("Unknown type");
- }
- } else if ("value" in value_description) {
- return value_description.value;
- } else if (value_description.type == UNDEFINED_TYPE) {
- return void 0;
- } else if (value_description.type == NULL_TYPE) {
- return null;
- } else {
- throw new Error("Failed to parse value description");
- }
-};
-
-
-DebugCommandProcessor.prototype.setVariableValueRequest_ =
- function(request, response) {
- if (!request.arguments) {
- response.failed('Missing arguments');
- return;
- }
-
- if (IS_UNDEFINED(request.arguments.name)) {
- response.failed('Missing variable name');
- }
- var variable_name = request.arguments.name;
-
- var scope_description = request.arguments.scope;
-
- // Get the frame or function for which the scope is requested.
- var scope_holder = this.resolveScopeHolder_(scope_description);
-
- if (IS_UNDEFINED(scope_description.number)) {
- response.failed('Missing scope number');
- }
- var scope_index = %ToNumber(scope_description.number);
-
- var scope = scope_holder.scope(scope_index);
-
- var new_value =
- DebugCommandProcessor.resolveValue_(request.arguments.newValue);
-
- scope.setVariableValue(variable_name, new_value);
-
- var new_value_mirror = MakeMirror(new_value);
-
- response.body = {
- newValue: new_value_mirror
- };
-};
-
-
DebugCommandProcessor.prototype.evaluateRequest_ = function(request, response) {
if (!request.arguments) {
return response.failed('Missing arguments');
@@ -2467,17 +2387,8 @@ DebugCommandProcessor.prototype.changeLiveRequest_ = function(
var new_source = request.arguments.new_source;
- var result_description;
- try {
- result_description = Debug.LiveEdit.SetScriptSource(the_script,
- new_source, preview_only, change_log);
- } catch (e) {
- if (e instanceof Debug.LiveEdit.Failure && "details" in e) {
- response.failed(e.message, e.details);
- return;
- }
- throw e;
- }
+ var result_description = Debug.LiveEdit.SetScriptSource(the_script,
+ new_source, preview_only, change_log);
response.body = {change_log: change_log, result: result_description};
if (!preview_only && !this.running_ && result_description.stack_modified) {
@@ -2752,7 +2663,3 @@ function ValueToProtocolValue_(value, mirror_serializer) {
}
return json;
}
-
-Debug.TestApi = {
- CommandProcessorResolveValue: DebugCommandProcessor.resolveValue_
-};
diff --git a/deps/v8/src/debug.cc b/deps/v8/src/debug.cc
index ea1c0842b..48c5519f7 100644
--- a/deps/v8/src/debug.cc
+++ b/deps/v8/src/debug.cc
@@ -261,12 +261,8 @@ void BreakLocationIterator::Reset() {
// Create relocation iterators for the two code objects.
if (reloc_iterator_ != NULL) delete reloc_iterator_;
if (reloc_iterator_original_ != NULL) delete reloc_iterator_original_;
- reloc_iterator_ = new RelocIterator(
- debug_info_->code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
- reloc_iterator_original_ = new RelocIterator(
- debug_info_->original_code(),
- ~RelocInfo::ModeMask(RelocInfo::CODE_AGE_SEQUENCE));
+ reloc_iterator_ = new RelocIterator(debug_info_->code());
+ reloc_iterator_original_ = new RelocIterator(debug_info_->original_code());
// Position at the first break point.
break_point_ = -1;
@@ -786,11 +782,9 @@ bool Debug::CompileDebuggerScript(int index) {
"error_loading_debugger", &computed_location,
Vector<Handle<Object> >::empty(), Handle<String>(), Handle<JSArray>());
ASSERT(!isolate->has_pending_exception());
- if (!exception.is_null()) {
- isolate->set_pending_exception(*exception);
- MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
- isolate->clear_pending_exception();
- }
+ isolate->set_pending_exception(*exception);
+ MessageHandler::ReportMessage(Isolate::Current(), NULL, message);
+ isolate->clear_pending_exception();
return false;
}
diff --git a/deps/v8/src/deoptimizer.cc b/deps/v8/src/deoptimizer.cc
index 48b1d3f59..ad893b3b4 100644
--- a/deps/v8/src/deoptimizer.cc
+++ b/deps/v8/src/deoptimizer.cc
@@ -41,11 +41,8 @@ namespace v8 {
namespace internal {
DeoptimizerData::DeoptimizerData() {
- eager_deoptimization_entry_code_entries_ = -1;
- lazy_deoptimization_entry_code_entries_ = -1;
- size_t deopt_table_size = Deoptimizer::GetMaxDeoptTableSize();
- eager_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
- lazy_deoptimization_entry_code_ = new VirtualMemory(deopt_table_size);
+ eager_deoptimization_entry_code_ = NULL;
+ lazy_deoptimization_entry_code_ = NULL;
current_ = NULL;
deoptimizing_code_list_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -55,18 +52,16 @@ DeoptimizerData::DeoptimizerData() {
DeoptimizerData::~DeoptimizerData() {
- delete eager_deoptimization_entry_code_;
- eager_deoptimization_entry_code_ = NULL;
- delete lazy_deoptimization_entry_code_;
- lazy_deoptimization_entry_code_ = NULL;
-
- DeoptimizingCodeListNode* current = deoptimizing_code_list_;
- while (current != NULL) {
- DeoptimizingCodeListNode* prev = current;
- current = current->next();
- delete prev;
+ if (eager_deoptimization_entry_code_ != NULL) {
+ Isolate::Current()->memory_allocator()->Free(
+ eager_deoptimization_entry_code_);
+ eager_deoptimization_entry_code_ = NULL;
+ }
+ if (lazy_deoptimization_entry_code_ != NULL) {
+ Isolate::Current()->memory_allocator()->Free(
+ lazy_deoptimization_entry_code_);
+ lazy_deoptimization_entry_code_ = NULL;
}
- deoptimizing_code_list_ = NULL;
}
@@ -101,20 +96,6 @@ Deoptimizer* Deoptimizer::New(JSFunction* function,
}
-// No larger than 2K on all platforms
-static const int kDeoptTableMaxEpilogueCodeSize = 2 * KB;
-
-
-size_t Deoptimizer::GetMaxDeoptTableSize() {
- int entries_size =
- Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
- int commit_page_size = static_cast<int>(OS::CommitPageSize());
- int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
- commit_page_size) + 1;
- return static_cast<size_t>(commit_page_size * page_count);
-}
-
-
Deoptimizer* Deoptimizer::Grab(Isolate* isolate) {
ASSERT(isolate == Isolate::Current());
Deoptimizer* result = isolate->deoptimizer_data()->current_;
@@ -473,45 +454,44 @@ void Deoptimizer::DeleteFrameDescriptions() {
}
-Address Deoptimizer::GetDeoptimizationEntry(int id,
- BailoutType type,
- GetEntryMode mode) {
+Address Deoptimizer::GetDeoptimizationEntry(int id, BailoutType type) {
ASSERT(id >= 0);
- if (id >= kMaxNumberOfEntries) return NULL;
- VirtualMemory* base = NULL;
- if (mode == ENSURE_ENTRY_CODE) {
- EnsureCodeForDeoptimizationEntry(type, id);
- } else {
- ASSERT(mode == CALCULATE_ENTRY_ADDRESS);
- }
+ if (id >= kNumberOfEntries) return NULL;
+ MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
+ if (data->eager_deoptimization_entry_code_ == NULL) {
+ data->eager_deoptimization_entry_code_ = CreateCode(type);
+ }
base = data->eager_deoptimization_entry_code_;
} else {
+ if (data->lazy_deoptimization_entry_code_ == NULL) {
+ data->lazy_deoptimization_entry_code_ = CreateCode(type);
+ }
base = data->lazy_deoptimization_entry_code_;
}
return
- static_cast<Address>(base->address()) + (id * table_entry_size_);
+ static_cast<Address>(base->area_start()) + (id * table_entry_size_);
}
int Deoptimizer::GetDeoptimizationId(Address addr, BailoutType type) {
- VirtualMemory* base = NULL;
+ MemoryChunk* base = NULL;
DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
if (type == EAGER) {
base = data->eager_deoptimization_entry_code_;
} else {
base = data->lazy_deoptimization_entry_code_;
}
- Address base_casted = reinterpret_cast<Address>(base->address());
if (base == NULL ||
- addr < base->address() ||
- addr >= base_casted + (kMaxNumberOfEntries * table_entry_size_)) {
+ addr < base->area_start() ||
+ addr >= base->area_start() +
+ (kNumberOfEntries * table_entry_size_)) {
return kNotDeoptimizationEntry;
}
ASSERT_EQ(0,
- static_cast<int>(addr - base_casted) % table_entry_size_);
- return static_cast<int>(addr - base_casted) / table_entry_size_;
+ static_cast<int>(addr - base->area_start()) % table_entry_size_);
+ return static_cast<int>(addr - base->area_start()) / table_entry_size_;
}
@@ -535,7 +515,7 @@ int Deoptimizer::GetOutputInfo(DeoptimizationOutputData* data,
shared->SourceCodePrint(&stream, -1);
PrintF("[source:\n%s\n]", *stream.ToCString());
- FATAL("unable to find pc offset during deoptimization");
+ UNREACHABLE();
return -1;
}
@@ -1397,45 +1377,31 @@ void Deoptimizer::AddDoubleValue(intptr_t slot_address, double value) {
}
-void Deoptimizer::EnsureCodeForDeoptimizationEntry(BailoutType type,
- int max_entry_id) {
+MemoryChunk* Deoptimizer::CreateCode(BailoutType type) {
// We cannot run this if the serializer is enabled because this will
// cause us to emit relocation information for the external
// references. This is fine because the deoptimizer's code section
// isn't meant to be serialized at all.
ASSERT(!Serializer::enabled());
- ASSERT(type == EAGER || type == LAZY);
- DeoptimizerData* data = Isolate::Current()->deoptimizer_data();
- int entry_count = (type == EAGER)
- ? data->eager_deoptimization_entry_code_entries_
- : data->lazy_deoptimization_entry_code_entries_;
- if (max_entry_id < entry_count) return;
- entry_count = Max(entry_count, Deoptimizer::kMinNumberOfEntries);
- while (max_entry_id >= entry_count) entry_count *= 2;
- ASSERT(entry_count <= Deoptimizer::kMaxNumberOfEntries);
-
MacroAssembler masm(Isolate::Current(), NULL, 16 * KB);
masm.set_emit_debug_code(false);
- GenerateDeoptimizationEntries(&masm, entry_count, type);
+ GenerateDeoptimizationEntries(&masm, kNumberOfEntries, type);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(desc.reloc_size == 0);
- VirtualMemory* memory = type == EAGER
- ? data->eager_deoptimization_entry_code_
- : data->lazy_deoptimization_entry_code_;
- size_t table_size = Deoptimizer::GetMaxDeoptTableSize();
- ASSERT(static_cast<int>(table_size) >= desc.instr_size);
- memory->Commit(memory->address(), table_size, true);
- memcpy(memory->address(), desc.buffer, desc.instr_size);
- CPU::FlushICache(memory->address(), desc.instr_size);
-
- if (type == EAGER) {
- data->eager_deoptimization_entry_code_entries_ = entry_count;
- } else {
- data->lazy_deoptimization_entry_code_entries_ = entry_count;
+ MemoryChunk* chunk =
+ Isolate::Current()->memory_allocator()->AllocateChunk(desc.instr_size,
+ EXECUTABLE,
+ NULL);
+ ASSERT(chunk->area_size() >= desc.instr_size);
+ if (chunk == NULL) {
+ V8::FatalProcessOutOfMemory("Not enough memory for deoptimization table");
}
+ memcpy(chunk->area_start(), desc.buffer, desc.instr_size);
+ CPU::FlushICache(chunk->area_start(), desc.instr_size);
+ return chunk;
}
diff --git a/deps/v8/src/deoptimizer.h b/deps/v8/src/deoptimizer.h
index 89955b38b..f67f986ba 100644
--- a/deps/v8/src/deoptimizer.h
+++ b/deps/v8/src/deoptimizer.h
@@ -100,10 +100,8 @@ class DeoptimizerData {
#endif
private:
- int eager_deoptimization_entry_code_entries_;
- int lazy_deoptimization_entry_code_entries_;
- VirtualMemory* eager_deoptimization_entry_code_;
- VirtualMemory* lazy_deoptimization_entry_code_;
+ MemoryChunk* eager_deoptimization_entry_code_;
+ MemoryChunk* lazy_deoptimization_entry_code_;
Deoptimizer* current_;
#ifdef ENABLE_DEBUGGER_SUPPORT
@@ -228,17 +226,7 @@ class Deoptimizer : public Malloced {
static void ComputeOutputFrames(Deoptimizer* deoptimizer);
-
- enum GetEntryMode {
- CALCULATE_ENTRY_ADDRESS,
- ENSURE_ENTRY_CODE
- };
-
-
- static Address GetDeoptimizationEntry(
- int id,
- BailoutType type,
- GetEntryMode mode = ENSURE_ENTRY_CODE);
+ static Address GetDeoptimizationEntry(int id, BailoutType type);
static int GetDeoptimizationId(Address addr, BailoutType type);
static int GetOutputInfo(DeoptimizationOutputData* data,
BailoutId node_id,
@@ -295,11 +283,8 @@ class Deoptimizer : public Malloced {
int ConvertJSFrameIndexToFrameIndex(int jsframe_index);
- static size_t GetMaxDeoptTableSize();
-
private:
- static const int kMinNumberOfEntries = 64;
- static const int kMaxNumberOfEntries = 16384;
+ static const int kNumberOfEntries = 16384;
Deoptimizer(Isolate* isolate,
JSFunction* function,
@@ -342,8 +327,7 @@ class Deoptimizer : public Malloced {
void AddArgumentsObjectValue(intptr_t value);
void AddDoubleValue(intptr_t slot_address, double value);
- static void EnsureCodeForDeoptimizationEntry(BailoutType type,
- int max_entry_id);
+ static MemoryChunk* CreateCode(BailoutType type);
static void GenerateDeoptimizationEntries(
MacroAssembler* masm, int count, BailoutType type);
@@ -537,6 +521,9 @@ class FrameDescription {
intptr_t context_;
StackFrame::Type type_;
Smi* state_;
+#ifdef DEBUG
+ Code::Kind kind_;
+#endif
// Continuation is the PC where the execution continues after
// deoptimizing.
diff --git a/deps/v8/src/elements-kind.cc b/deps/v8/src/elements-kind.cc
index 7b1651a95..655a23bf1 100644
--- a/deps/v8/src/elements-kind.cc
+++ b/deps/v8/src/elements-kind.cc
@@ -35,14 +35,9 @@ namespace v8 {
namespace internal {
-const char* ElementsKindToString(ElementsKind kind) {
- ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
- return accessor->name();
-}
-
-
void PrintElementsKind(FILE* out, ElementsKind kind) {
- PrintF(out, "%s", ElementsKindToString(kind));
+ ElementsAccessor* accessor = ElementsAccessor::ForKind(kind);
+ PrintF(out, "%s", accessor->name());
}
diff --git a/deps/v8/src/elements-kind.h b/deps/v8/src/elements-kind.h
index cb3bb9c9e..3be7711a3 100644
--- a/deps/v8/src/elements-kind.h
+++ b/deps/v8/src/elements-kind.h
@@ -77,7 +77,6 @@ const int kElementsKindCount = LAST_ELEMENTS_KIND - FIRST_ELEMENTS_KIND + 1;
const int kFastElementsKindCount = LAST_FAST_ELEMENTS_KIND -
FIRST_FAST_ELEMENTS_KIND + 1;
-const char* ElementsKindToString(ElementsKind kind);
void PrintElementsKind(FILE* out, ElementsKind kind);
ElementsKind GetInitialFastElementsKind();
@@ -110,13 +109,6 @@ inline bool IsFastDoubleElementsKind(ElementsKind kind) {
}
-inline bool IsDoubleOrFloatElementsKind(ElementsKind kind) {
- return IsFastDoubleElementsKind(kind) ||
- kind == EXTERNAL_DOUBLE_ELEMENTS ||
- kind == EXTERNAL_FLOAT_ELEMENTS;
-}
-
-
inline bool IsFastSmiOrObjectElementsKind(ElementsKind kind) {
return kind == FAST_SMI_ELEMENTS ||
kind == FAST_HOLEY_SMI_ELEMENTS ||
diff --git a/deps/v8/src/elements.cc b/deps/v8/src/elements.cc
index 8e1bf3ec8..6afbcc0ee 100644
--- a/deps/v8/src/elements.cc
+++ b/deps/v8/src/elements.cc
@@ -146,36 +146,33 @@ static Failure* ThrowArrayLengthRangeError(Heap* heap) {
}
-static void CopyObjectToObjectElements(FixedArrayBase* from_base,
- ElementsKind from_kind,
- uint32_t from_start,
- FixedArrayBase* to_base,
- ElementsKind to_kind,
- uint32_t to_start,
- int raw_copy_size) {
- ASSERT(to_base->map() != HEAP->fixed_cow_array_map());
- AssertNoAllocation no_allocation;
+void CopyObjectToObjectElements(FixedArray* from,
+ ElementsKind from_kind,
+ uint32_t from_start,
+ FixedArray* to,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int raw_copy_size) {
+ ASSERT(to->map() != HEAP->fixed_cow_array_map());
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
}
}
+#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
Address to_address = to->address() + FixedArray::kHeaderSize;
@@ -196,34 +193,31 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
}
-static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
+static void CopyDictionaryToObjectElements(SeededNumberDictionary* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
- AssertNoAllocation no_allocation;
int copy_size = raw_copy_size;
Heap* heap = from->GetHeap();
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
+#ifdef DEBUG
+ // Fast object arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- int start = to_start + copy_size;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
}
}
+#endif
}
- ASSERT(to_base != from_base);
+ ASSERT(to != from);
ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
if (copy_size == 0) return;
- FixedArray* to = FixedArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -250,9 +244,9 @@ static void CopyDictionaryToObjectElements(FixedArrayBase* from_base,
MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
- FixedArrayBase* from_base,
+ FixedDoubleArray* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedArray* to,
ElementsKind to_kind,
uint32_t to_start,
int raw_copy_size) {
@@ -261,26 +255,21 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
+#ifdef DEBUG
+ // FAST_*_ELEMENTS arrays cannot be uninitialized. Ensure they are already
+ // marked with the hole.
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- // Also initialize the area that will be copied over since HeapNumber
- // allocation below can cause an incremental marking step, requiring all
- // existing heap objects to be propertly initialized.
- int start = to_start;
- int length = to_base->length() - start;
- if (length > 0) {
- Heap* heap = from_base->GetHeap();
- MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
- heap->the_hole_value(), length);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ ASSERT(to->get(i)->IsTheHole());
}
}
+#endif
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
- if (copy_size == 0) return from_base;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedArray* to = FixedArray::cast(to_base);
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
+ if (copy_size == 0) return from;
for (int i = 0; i < copy_size; ++i) {
if (IsFastSmiElementsKind(to_kind)) {
UNIMPLEMENTED();
@@ -309,28 +298,26 @@ MUST_USE_RESULT static MaybeObject* CopyDoubleToObjectElements(
}
-static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
+static void CopyDoubleToDoubleElements(FixedDoubleArray* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = Min(from_base->length() - from_start,
- to_base->length() - to_start);
+ copy_size = Min(from->length() - from_start,
+ to->length() - to_start);
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
to_address += kDoubleSize * to_start;
@@ -342,27 +329,25 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
}
-static void CopySmiToDoubleElements(FixedArrayBase* from_base,
+static void CopySmiToDoubleElements(FixedArray* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
+ copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
from_start < from_end; from_start++, to_start++) {
@@ -376,9 +361,9 @@ static void CopySmiToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
+static void CopyPackedSmiToDoubleElements(FixedArray* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedDoubleArray* to,
uint32_t to_start,
int packed_size,
int raw_copy_size) {
@@ -387,55 +372,52 @@ static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = packed_size - from_start;
+ copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- to_end = to_base->length();
- for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
- }
+ to_end = to->length();
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
} else {
to_end = to_start + static_cast<uint32_t>(copy_size);
}
- ASSERT(static_cast<int>(to_end) <= to_base->length());
+ ASSERT(static_cast<int>(to_end) <= to->length());
ASSERT(packed_size >= 0 && packed_size <= copy_size);
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
from_start < from_end; from_start++, to_start++) {
Object* smi = from->get(from_start);
ASSERT(!smi->IsTheHole());
to->set(to_start, Smi::cast(smi)->value());
}
+
+ while (to_start < to_end) {
+ to->set_the_hole(to_start++);
+ }
}
-static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
+static void CopyObjectToDoubleElements(FixedArray* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
int copy_size = raw_copy_size;
if (raw_copy_size < 0) {
ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
- copy_size = from_base->length() - from_start;
+ copy_size = from->length() - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
}
}
}
- ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
- (copy_size + static_cast<int>(from_start)) <= from_base->length());
+ ASSERT((copy_size + static_cast<int>(to_start)) <= to->length() &&
+ (copy_size + static_cast<int>(from_start)) <= from->length());
if (copy_size == 0) return;
- FixedArray* from = FixedArray::cast(from_base);
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
Object* the_hole = from->GetHeap()->the_hole_value();
for (uint32_t from_end = from_start + copy_size;
from_start < from_end; from_start++, to_start++) {
@@ -449,25 +431,23 @@ static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
}
-static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
+static void CopyDictionaryToDoubleElements(SeededNumberDictionary* from,
uint32_t from_start,
- FixedArrayBase* to_base,
+ FixedDoubleArray* to,
uint32_t to_start,
int raw_copy_size) {
- SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
int copy_size = raw_copy_size;
if (copy_size < 0) {
ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
copy_size = from->max_number_key() + 1 - from_start;
if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
- for (int i = to_start + copy_size; i < to_base->length(); ++i) {
- FixedDoubleArray::cast(to_base)->set_the_hole(i);
+ for (int i = to_start + copy_size; i < to->length(); ++i) {
+ to->set_the_hole(i);
}
}
}
if (copy_size == 0) return;
- FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
uint32_t to_length = to->length();
if (to_start + copy_size > to_length) {
copy_size = to_length - to_start;
@@ -523,8 +503,8 @@ class ElementsAccessorBase : public ElementsAccessor {
Map* map = fixed_array_base->map();
// Arrays that have been shifted in place can't be verified.
Heap* heap = holder->GetHeap();
- if (map == heap->one_pointer_filler_map() ||
- map == heap->two_pointer_filler_map() ||
+ if (map == heap->raw_unchecked_one_pointer_filler_map() ||
+ map == heap->raw_unchecked_two_pointer_filler_map() ||
map == heap->free_space_map()) {
return;
}
@@ -547,9 +527,10 @@ class ElementsAccessorBase : public ElementsAccessor {
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store) != ABSENT;
+ BackingStore* backing_store) {
+ MaybeObject* element =
+ ElementsAccessorSubclass::GetImpl(receiver, holder, key, backing_store);
+ return !element->IsTheHole();
}
virtual bool HasElement(Object* receiver,
@@ -560,7 +541,7 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, backing_store);
+ receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT virtual MaybeObject* Get(Object* receiver,
@@ -571,95 +552,28 @@ class ElementsAccessorBase : public ElementsAccessor {
backing_store = holder->elements();
}
return ElementsAccessorSubclass::GetImpl(
- receiver, holder, key, backing_store);
+ receiver, holder, key, BackingStore::cast(backing_store));
}
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArrayBase* backing_store) {
+ BackingStore* backing_store) {
return (key < ElementsAccessorSubclass::GetCapacityImpl(backing_store))
- ? BackingStore::cast(backing_store)->get(key)
+ ? backing_store->get(key)
: backing_store->GetHeap()->the_hole_value();
}
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetAttributesImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
- return ABSENT;
- }
- return BackingStore::cast(backing_store)->is_the_hole(key) ? ABSENT : NONE;
- }
-
- MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetTypeImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
- return NONEXISTENT;
- }
- return BackingStore::cast(backing_store)->is_the_hole(key)
- ? NONEXISTENT : FIELD;
- }
-
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store) {
- if (backing_store == NULL) {
- backing_store = holder->elements();
- }
- return ElementsAccessorSubclass::GetAccessorPairImpl(
- receiver, holder, key, backing_store);
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return NULL;
- }
-
MUST_USE_RESULT virtual MaybeObject* SetLength(JSArray* array,
Object* length) {
return ElementsAccessorSubclass::SetLengthImpl(
- array, length, array->elements());
+ array, length, BackingStore::cast(array->elements()));
}
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- FixedArrayBase* backing_store);
+ BackingStore* backing_store);
MUST_USE_RESULT virtual MaybeObject* SetCapacityAndLength(
JSArray* array,
@@ -717,6 +631,9 @@ class ElementsAccessorBase : public ElementsAccessor {
}
}
}
+ if (from->length() == 0) {
+ return from;
+ }
return ElementsAccessorSubclass::CopyElementsImpl(
from, from_start, to, to_kind, to_start, packed_size, copy_size);
}
@@ -737,22 +654,25 @@ class ElementsAccessorBase : public ElementsAccessor {
if (from == NULL) {
from = holder->elements();
}
+ BackingStore* backing_store = BackingStore::cast(from);
+ uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(backing_store);
// Optimize if 'other' is empty.
// We cannot optimize if 'this' is empty, as other may have holes.
- uint32_t len1 = ElementsAccessorSubclass::GetCapacityImpl(from);
if (len1 == 0) return to;
// Compute how many elements are not in other.
uint32_t extra = 0;
for (uint32_t y = 0; y < len1; y++) {
- uint32_t key = ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
+ uint32_t key =
+ ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
+ receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
+ ElementsAccessorSubclass::GetImpl(receiver, holder,
+ key, backing_store);
Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
+ if (!maybe_value->ToObject(&value)) return maybe_value;
ASSERT(!value->IsTheHole());
if (!HasKey(to, value)) {
extra++;
@@ -764,8 +684,9 @@ class ElementsAccessorBase : public ElementsAccessor {
// Allocate the result
FixedArray* result;
- MaybeObject* maybe_obj = from->GetHeap()->AllocateFixedArray(len0 + extra);
- if (!maybe_obj->To(&result)) return maybe_obj;
+ MaybeObject* maybe_obj =
+ backing_store->GetHeap()->AllocateFixedArray(len0 + extra);
+ if (!maybe_obj->To<FixedArray>(&result)) return maybe_obj;
// Fill in the content
{
@@ -781,13 +702,14 @@ class ElementsAccessorBase : public ElementsAccessor {
uint32_t index = 0;
for (uint32_t y = 0; y < len1; y++) {
uint32_t key =
- ElementsAccessorSubclass::GetKeyForIndexImpl(from, y);
+ ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, y);
if (ElementsAccessorSubclass::HasElementImpl(
- receiver, holder, key, from)) {
+ receiver, holder, key, backing_store)) {
MaybeObject* maybe_value =
- ElementsAccessorSubclass::GetImpl(receiver, holder, key, from);
+ ElementsAccessorSubclass::GetImpl(receiver, holder,
+ key, backing_store);
Object* value;
- if (!maybe_value->To(&value)) return maybe_value;
+ if (!maybe_value->ToObject(&value)) return maybe_value;
if (!value->IsTheHole() && !HasKey(to, value)) {
result->set(len0 + index, value);
index++;
@@ -799,22 +721,24 @@ class ElementsAccessorBase : public ElementsAccessor {
}
protected:
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
+ static uint32_t GetCapacityImpl(BackingStore* backing_store) {
return backing_store->length();
}
virtual uint32_t GetCapacity(FixedArrayBase* backing_store) {
- return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
+ return ElementsAccessorSubclass::GetCapacityImpl(
+ BackingStore::cast(backing_store));
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* backing_store,
+ static uint32_t GetKeyForIndexImpl(BackingStore* backing_store,
uint32_t index) {
return index;
}
virtual uint32_t GetKeyForIndex(FixedArrayBase* backing_store,
uint32_t index) {
- return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
+ return ElementsAccessorSubclass::GetKeyForIndexImpl(
+ BackingStore::cast(backing_store), index);
}
private:
@@ -840,17 +764,17 @@ class FastElementsAccessor
// Adjusts the length of the fast backing store or returns the new length or
// undefined in case conversion to a slow backing store should be performed.
- static MaybeObject* SetLengthWithoutNormalize(FixedArrayBase* backing_store,
+ static MaybeObject* SetLengthWithoutNormalize(BackingStore* backing_store,
JSArray* array,
Object* length_object,
uint32_t length) {
uint32_t old_capacity = backing_store->length();
Object* old_length = array->length();
- bool same_or_smaller_size = old_length->IsSmi() &&
- static_cast<uint32_t>(Smi::cast(old_length)->value()) >= length;
+ bool same_size = old_length->IsSmi() &&
+ static_cast<uint32_t>(Smi::cast(old_length)->value()) == length;
ElementsKind kind = array->GetElementsKind();
- if (!same_or_smaller_size && IsFastElementsKind(kind) &&
+ if (!same_size && IsFastElementsKind(kind) &&
!IsFastHoleyElementsKind(kind)) {
kind = GetHoleyElementsKind(kind);
MaybeObject* maybe_obj = array->TransitionElementsKind(kind);
@@ -878,7 +802,7 @@ class FastElementsAccessor
// Otherwise, fill the unused tail with holes.
int old_length = FastD2IChecked(array->length()->Number());
for (int i = length; i < old_length; i++) {
- BackingStore::cast(backing_store)->set_the_hole(i);
+ backing_store->set_the_hole(i);
}
}
return length_object;
@@ -905,38 +829,32 @@ class FastElementsAccessor
ASSERT(obj->HasFastSmiOrObjectElements() ||
obj->HasFastDoubleElements() ||
obj->HasFastArgumentsElements());
- Heap* heap = obj->GetHeap();
- Object* elements = obj->elements();
- if (elements == heap->empty_fixed_array()) {
- return heap->true_value();
- }
typename KindTraits::BackingStore* backing_store =
- KindTraits::BackingStore::cast(elements);
- bool is_non_strict_arguments_elements_map =
- backing_store->map() == heap->non_strict_arguments_elements_map();
- if (is_non_strict_arguments_elements_map) {
- backing_store = KindTraits::BackingStore::cast(
- FixedArray::cast(backing_store)->get(1));
+ KindTraits::BackingStore::cast(obj->elements());
+ Heap* heap = obj->GetHeap();
+ if (backing_store->map() == heap->non_strict_arguments_elements_map()) {
+ backing_store =
+ KindTraits::BackingStore::cast(
+ FixedArray::cast(backing_store)->get(1));
+ } else {
+ ElementsKind kind = KindTraits::Kind;
+ if (IsFastPackedElementsKind(kind)) {
+ MaybeObject* transitioned =
+ obj->TransitionElementsKind(GetHoleyElementsKind(kind));
+ if (transitioned->IsFailure()) return transitioned;
+ }
+ if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
+ Object* writable;
+ MaybeObject* maybe = obj->EnsureWritableFastElements();
+ if (!maybe->ToObject(&writable)) return maybe;
+ backing_store = KindTraits::BackingStore::cast(writable);
+ }
}
uint32_t length = static_cast<uint32_t>(
obj->IsJSArray()
? Smi::cast(JSArray::cast(obj)->length())->value()
: backing_store->length());
if (key < length) {
- if (!is_non_strict_arguments_elements_map) {
- ElementsKind kind = KindTraits::Kind;
- if (IsFastPackedElementsKind(kind)) {
- MaybeObject* transitioned =
- obj->TransitionElementsKind(GetHoleyElementsKind(kind));
- if (transitioned->IsFailure()) return transitioned;
- }
- if (IsFastSmiOrObjectElementsKind(KindTraits::Kind)) {
- Object* writable;
- MaybeObject* maybe = obj->EnsureWritableFastElements();
- if (!maybe->ToObject(&writable)) return maybe;
- backing_store = KindTraits::BackingStore::cast(writable);
- }
- }
backing_store->set_the_hole(key);
// If an old space backing store is larger than a certain size and
// has too few used values, normalize it.
@@ -972,11 +890,11 @@ class FastElementsAccessor
Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ typename KindTraits::BackingStore* backing_store) {
if (key >= static_cast<uint32_t>(backing_store->length())) {
return false;
}
- return !BackingStore::cast(backing_store)->is_the_hole(key);
+ return !backing_store->is_the_hole(key);
}
static void ValidateContents(JSObject* holder, int length) {
@@ -1024,18 +942,25 @@ class FastSmiOrObjectElementsAccessor
int copy_size) {
if (IsFastSmiOrObjectElementsKind(to_kind)) {
CopyObjectToObjectElements(
- from, KindTraits::Kind, from_start, to, to_kind, to_start, copy_size);
+ FixedArray::cast(from), KindTraits::Kind, from_start,
+ FixedArray::cast(to), to_kind, to_start, copy_size);
} else if (IsFastDoubleElementsKind(to_kind)) {
if (IsFastSmiElementsKind(KindTraits::Kind)) {
if (IsFastPackedElementsKind(KindTraits::Kind) &&
packed_size != kPackedSizeNotKnown) {
CopyPackedSmiToDoubleElements(
- from, from_start, to, to_start, packed_size, copy_size);
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start,
+ packed_size, copy_size);
} else {
- CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+ CopySmiToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
}
} else {
- CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+ CopyObjectToDoubleElements(
+ FixedArray::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
}
} else {
UNREACHABLE();
@@ -1139,10 +1064,13 @@ class FastDoubleElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
return CopyDoubleToObjectElements(
- from, from_start, to, to_kind, to_start, copy_size);
+ FixedDoubleArray::cast(from), from_start, FixedArray::cast(to),
+ to_kind, to_start, copy_size);
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
- CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
+ CopyDoubleToDoubleElements(FixedDoubleArray::cast(from), from_start,
+ FixedDoubleArray::cast(to),
+ to_start, copy_size);
return from;
default:
UNREACHABLE();
@@ -1201,37 +1129,17 @@ class ExternalElementsAccessor
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArrayBase* backing_store) {
+ BackingStore* backing_store) {
return
key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? BackingStore::cast(backing_store)->get(key)
+ ? backing_store->get(key)
: backing_store->GetHeap()->undefined_value();
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? NONE : ABSENT;
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- return
- key < ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store)
- ? FIELD : NONEXISTENT;
- }
-
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- FixedArrayBase* backing_store) {
+ BackingStore* backing_store) {
// External arrays do not support changing their length.
UNREACHABLE();
return obj;
@@ -1247,7 +1155,7 @@ class ExternalElementsAccessor
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
+ BackingStore* backing_store) {
uint32_t capacity =
ExternalElementsAccessorSubclass::GetCapacityImpl(backing_store);
return key < capacity;
@@ -1356,11 +1264,10 @@ class DictionaryElementsAccessor
// Adjusts the length of the dictionary backing store and returns the new
// length according to ES5 section 15.4.5.2 behavior.
MUST_USE_RESULT static MaybeObject* SetLengthWithoutNormalize(
- FixedArrayBase* store,
+ SeededNumberDictionary* dict,
JSArray* array,
Object* length_object,
uint32_t length) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Heap* heap = array->GetHeap();
int capacity = dict->Capacity();
uint32_t new_length = length;
@@ -1470,12 +1377,14 @@ class DictionaryElementsAccessor
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
CopyDictionaryToObjectElements(
- from, from_start, to, to_kind, to_start, copy_size);
+ SeededNumberDictionary::cast(from), from_start,
+ FixedArray::cast(to), to_kind, to_start, copy_size);
return from;
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
CopyDictionaryToDoubleElements(
- from, from_start, to, to_start, copy_size);
+ SeededNumberDictionary::cast(from), from_start,
+ FixedDoubleArray::cast(to), to_start, copy_size);
return from;
default:
UNREACHABLE();
@@ -1498,8 +1407,7 @@ class DictionaryElementsAccessor
Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
+ SeededNumberDictionary* backing_store) {
int entry = backing_store->FindEntry(key);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = backing_store->ValueAt(entry);
@@ -1516,59 +1424,16 @@ class DictionaryElementsAccessor
return obj->GetHeap()->the_hole_value();
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- SeededNumberDictionary* dictionary =
- SeededNumberDictionary::cast(backing_store);
- int entry = dictionary->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return dictionary->DetailsAt(entry).attributes();
- }
- return ABSENT;
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound) {
- return backing_store->DetailsAt(entry).type();
- }
- return NONEXISTENT;
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* store) {
- SeededNumberDictionary* backing_store = SeededNumberDictionary::cast(store);
- int entry = backing_store->FindEntry(key);
- if (entry != SeededNumberDictionary::kNotFound &&
- backing_store->DetailsAt(entry).type() == CALLBACKS &&
- backing_store->ValueAt(entry)->IsAccessorPair()) {
- return AccessorPair::cast(backing_store->ValueAt(entry));
- }
- return NULL;
- }
-
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* backing_store) {
- return SeededNumberDictionary::cast(backing_store)->FindEntry(key) !=
+ SeededNumberDictionary* backing_store) {
+ return backing_store->FindEntry(key) !=
SeededNumberDictionary::kNotFound;
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* store,
+ static uint32_t GetKeyForIndexImpl(SeededNumberDictionary* dict,
uint32_t index) {
- SeededNumberDictionary* dict = SeededNumberDictionary::cast(store);
Object* key = dict->KeyAt(index);
return Smi::cast(key)->value();
}
@@ -1591,8 +1456,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
MUST_USE_RESULT static MaybeObject* GetImpl(Object* receiver,
JSObject* obj,
uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
+ FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(obj, parameter_map, key);
if (!probe->IsTheHole()) {
Context* context = Context::cast(parameter_map->get(0));
@@ -1619,61 +1483,10 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
}
}
- MUST_USE_RESULT static PropertyAttributes GetAttributesImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return NONE;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetAttributes(
- receiver, obj, key, arguments);
- }
- }
-
- MUST_USE_RESULT static PropertyType GetTypeImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return FIELD;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetType(
- receiver, obj, key, arguments);
- }
- }
-
- MUST_USE_RESULT static AccessorPair* GetAccessorPairImpl(
- Object* receiver,
- JSObject* obj,
- uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
- Object* probe = GetParameterMapArg(obj, parameter_map, key);
- if (!probe->IsTheHole()) {
- return NULL;
- } else {
- // If not aliased, check the arguments.
- FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
- return ElementsAccessor::ForArray(arguments)->GetAccessorPair(
- receiver, obj, key, arguments);
- }
- }
-
MUST_USE_RESULT static MaybeObject* SetLengthImpl(
JSObject* obj,
Object* length,
- FixedArrayBase* parameter_map) {
+ FixedArray* parameter_map) {
// TODO(mstarzinger): This was never implemented but will be used once we
// correctly implement [[DefineOwnProperty]] on arrays.
UNIMPLEMENTED();
@@ -1712,20 +1525,19 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
int packed_size,
int copy_size) {
FixedArray* parameter_map = FixedArray::cast(from);
- FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return accessor->CopyElements(NULL, from_start, to, to_kind,
to_start, copy_size, arguments);
}
- static uint32_t GetCapacityImpl(FixedArrayBase* backing_store) {
- FixedArray* parameter_map = FixedArray::cast(backing_store);
+ static uint32_t GetCapacityImpl(FixedArray* parameter_map) {
FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
return Max(static_cast<uint32_t>(parameter_map->length() - 2),
ForArray(arguments)->GetCapacity(arguments));
}
- static uint32_t GetKeyForIndexImpl(FixedArrayBase* dict,
+ static uint32_t GetKeyForIndexImpl(FixedArray* dict,
uint32_t index) {
return index;
}
@@ -1733,14 +1545,12 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
static bool HasElementImpl(Object* receiver,
JSObject* holder,
uint32_t key,
- FixedArrayBase* parameters) {
- FixedArray* parameter_map = FixedArray::cast(parameters);
+ FixedArray* parameter_map) {
Object* probe = GetParameterMapArg(holder, parameter_map, key);
if (!probe->IsTheHole()) {
return true;
} else {
- FixedArrayBase* arguments =
- FixedArrayBase::cast(FixedArray::cast(parameter_map)->get(1));
+ FixedArrayBase* arguments = FixedArrayBase::cast(parameter_map->get(1));
ElementsAccessor* accessor = ElementsAccessor::ForArray(arguments);
return !accessor->Get(receiver, holder, key, arguments)->IsTheHole();
}
@@ -1753,7 +1563,7 @@ class NonStrictArgumentsElementsAccessor : public ElementsAccessorBase<
uint32_t length = holder->IsJSArray()
? Smi::cast(JSArray::cast(holder)->length())->value()
: parameter_map->length();
- return key < (length - 2)
+ return key < (length - 2 )
? parameter_map->get(key + 2)
: parameter_map->GetHeap()->the_hole_value();
}
@@ -1820,7 +1630,7 @@ MUST_USE_RESULT MaybeObject* ElementsAccessorBase<ElementsAccessorSubclass,
ElementsKindTraits>::
SetLengthImpl(JSObject* obj,
Object* length,
- FixedArrayBase* backing_store) {
+ typename ElementsKindTraits::BackingStore* backing_store) {
JSArray* array = JSArray::cast(obj);
// Fast case: The new length fits into a Smi.
diff --git a/deps/v8/src/elements.h b/deps/v8/src/elements.h
index ffd6428ce..822fca50e 100644
--- a/deps/v8/src/elements.h
+++ b/deps/v8/src/elements.h
@@ -71,39 +71,6 @@ class ElementsAccessor {
uint32_t key,
FixedArrayBase* backing_store = NULL) = 0;
- // Returns an element's attributes, or ABSENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns an element's type, or NONEXISTENT if there is no such
- // element. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual PropertyType GetType(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
- // Returns an element's accessors, or NULL if the element does not exist or
- // is plain. This method doesn't iterate up the prototype chain. The caller
- // can optionally pass in the backing store to use for the check, which must
- // be compatible with the ElementsKind of the ElementsAccessor. If
- // backing_store is NULL, the holder->elements() is used as the backing store.
- MUST_USE_RESULT virtual AccessorPair* GetAccessorPair(
- Object* receiver,
- JSObject* holder,
- uint32_t key,
- FixedArrayBase* backing_store = NULL) = 0;
-
// Modifies the length data property as specified for JSArrays and resizes the
// underlying backing store accordingly. The method honors the semantics of
// changing array sizes as defined in EcmaScript 5.1 15.4.5.2, i.e. array that
@@ -197,6 +164,16 @@ class ElementsAccessor {
DISALLOW_COPY_AND_ASSIGN(ElementsAccessor);
};
+
+void CopyObjectToObjectElements(FixedArray* from_obj,
+ ElementsKind from_kind,
+ uint32_t from_start,
+ FixedArray* to_obj,
+ ElementsKind to_kind,
+ uint32_t to_start,
+ int copy_size);
+
+
} } // namespace v8::internal
#endif // V8_ELEMENTS_H_
diff --git a/deps/v8/src/execution.cc b/deps/v8/src/execution.cc
index e43ea65fc..89091ba42 100644
--- a/deps/v8/src/execution.cc
+++ b/deps/v8/src/execution.cc
@@ -211,9 +211,6 @@ Handle<Object> Execution::TryCall(Handle<JSFunction> func,
Isolate* isolate = Isolate::Current();
ASSERT(isolate->has_pending_exception());
ASSERT(isolate->external_caught_exception());
- if (isolate->is_out_of_memory() && !isolate->ignore_out_of_memory()) {
- V8::FatalProcessOutOfMemory("OOM during Execution::TryCall");
- }
if (isolate->pending_exception() ==
isolate->heap()->termination_exception()) {
result = isolate->factory()->termination_exception();
@@ -430,6 +427,25 @@ void StackGuard::TerminateExecution() {
}
+bool StackGuard::IsRuntimeProfilerTick() {
+ ExecutionAccess access(isolate_);
+ return (thread_local_.interrupt_flags_ & RUNTIME_PROFILER_TICK) != 0;
+}
+
+
+void StackGuard::RequestRuntimeProfilerTick() {
+ // Ignore calls if we're not optimizing or if we can't get the lock.
+ if (FLAG_opt && ExecutionAccess::TryLock(isolate_)) {
+ thread_local_.interrupt_flags_ |= RUNTIME_PROFILER_TICK;
+ if (thread_local_.postpone_interrupts_nesting_ == 0) {
+ thread_local_.jslimit_ = thread_local_.climit_ = kInterruptLimit;
+ isolate_->heap()->SetStackLimits();
+ }
+ ExecutionAccess::Unlock(isolate_);
+ }
+}
+
+
void StackGuard::RequestCodeReadyEvent() {
ASSERT(FLAG_parallel_recompilation);
if (ExecutionAccess::TryLock(isolate_)) {
@@ -921,14 +937,18 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
}
stack_guard->Continue(CODE_READY);
}
- if (!stack_guard->IsTerminateExecution() &&
- !FLAG_manual_parallel_recompilation) {
+ if (!stack_guard->IsTerminateExecution()) {
isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
}
isolate->counters()->stack_interrupts()->Increment();
- isolate->counters()->runtime_profiler_ticks()->Increment();
- isolate->runtime_profiler()->OptimizeNow();
+ // If FLAG_count_based_interrupts, every interrupt is a profiler interrupt.
+ if (FLAG_count_based_interrupts ||
+ stack_guard->IsRuntimeProfilerTick()) {
+ isolate->counters()->runtime_profiler_ticks()->Increment();
+ stack_guard->Continue(RUNTIME_PROFILER_TICK);
+ isolate->runtime_profiler()->OptimizeNow();
+ }
#ifdef ENABLE_DEBUGGER_SUPPORT
if (stack_guard->IsDebugBreak() || stack_guard->IsDebugCommand()) {
DebugBreakHelper();
diff --git a/deps/v8/src/execution.h b/deps/v8/src/execution.h
index 448b8d68a..9f5d9ff2c 100644
--- a/deps/v8/src/execution.h
+++ b/deps/v8/src/execution.h
@@ -41,8 +41,9 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
- GC_REQUEST = 1 << 5,
- CODE_READY = 1 << 6
+ RUNTIME_PROFILER_TICK = 1 << 5,
+ GC_REQUEST = 1 << 6,
+ CODE_READY = 1 << 7
};
@@ -193,6 +194,8 @@ class StackGuard {
void Interrupt();
bool IsTerminateExecution();
void TerminateExecution();
+ bool IsRuntimeProfilerTick();
+ void RequestRuntimeProfilerTick();
bool IsCodeReadyEvent();
void RequestCodeReadyEvent();
#ifdef ENABLE_DEBUGGER_SUPPORT
diff --git a/deps/v8/src/extensions/externalize-string-extension.cc b/deps/v8/src/extensions/externalize-string-extension.cc
index a126a5a56..50d876136 100644
--- a/deps/v8/src/extensions/externalize-string-extension.cc
+++ b/deps/v8/src/extensions/externalize-string-extension.cc
@@ -93,7 +93,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::Externalize(
return v8::ThrowException(v8::String::New(
"externalizeString() can't externalize twice."));
}
- if (string->IsOneByteRepresentation() && !force_two_byte) {
+ if (string->IsAsciiRepresentation() && !force_two_byte) {
char* data = new char[string->length()];
String::WriteToFlat(*string, data, 0, string->length());
SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
@@ -127,8 +127,7 @@ v8::Handle<v8::Value> ExternalizeStringExtension::IsAscii(
return v8::ThrowException(v8::String::New(
"isAsciiString() requires a single string argument."));
}
- return
- Utils::OpenHandle(*args[0].As<v8::String>())->IsOneByteRepresentation() ?
+ return Utils::OpenHandle(*args[0].As<v8::String>())->IsAsciiRepresentation() ?
v8::True() : v8::False();
}
diff --git a/deps/v8/src/extensions/gc-extension.cc b/deps/v8/src/extensions/gc-extension.cc
index 813b9219b..f921552aa 100644
--- a/deps/v8/src/extensions/gc-extension.cc
+++ b/deps/v8/src/extensions/gc-extension.cc
@@ -40,11 +40,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
- if (args[0]->BooleanValue()) {
- HEAP->CollectGarbage(NEW_SPACE, "gc extension");
- } else {
- HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
- }
+ HEAP->CollectAllGarbage(Heap::kNoGCFlags, "gc extension");
return v8::Undefined();
}
diff --git a/deps/v8/src/factory.cc b/deps/v8/src/factory.cc
index 556f2b01b..a2bb9391e 100644
--- a/deps/v8/src/factory.cc
+++ b/deps/v8/src/factory.cc
@@ -178,7 +178,7 @@ Handle<String> Factory::LookupAsciiSymbol(Vector<const char> string) {
}
-Handle<String> Factory::LookupAsciiSymbol(Handle<SeqOneByteString> string,
+Handle<String> Factory::LookupAsciiSymbol(Handle<SeqAsciiString> string,
int from,
int length) {
CALL_HEAP_FUNCTION(isolate(),
@@ -200,7 +200,7 @@ Handle<String> Factory::NewStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateStringFromOneByte(string, pretenure),
+ isolate()->heap()->AllocateStringFromAscii(string, pretenure),
String);
}
@@ -222,12 +222,12 @@ Handle<String> Factory::NewStringFromTwoByte(Vector<const uc16> string,
}
-Handle<SeqOneByteString> Factory::NewRawOneByteString(int length,
+Handle<SeqAsciiString> Factory::NewRawAsciiString(int length,
PretenureFlag pretenure) {
CALL_HEAP_FUNCTION(
isolate(),
- isolate()->heap()->AllocateRawOneByteString(length, pretenure),
- SeqOneByteString);
+ isolate()->heap()->AllocateRawAsciiString(length, pretenure),
+ SeqAsciiString);
}
@@ -525,12 +525,6 @@ Handle<FixedArray> Factory::CopyFixedArray(Handle<FixedArray> array) {
}
-Handle<FixedArray> Factory::CopySizeFixedArray(Handle<FixedArray> array,
- int new_length) {
- CALL_HEAP_FUNCTION(isolate(), array->CopySize(new_length), FixedArray);
-}
-
-
Handle<FixedDoubleArray> Factory::CopyFixedDoubleArray(
Handle<FixedDoubleArray> array) {
CALL_HEAP_FUNCTION(isolate(), array->Copy(), FixedDoubleArray);
@@ -876,13 +870,6 @@ Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
}
-Handle<JSObject> Factory::NewExternal(void* value) {
- CALL_HEAP_FUNCTION(isolate(),
- isolate()->heap()->AllocateExternal(value),
- JSObject);
-}
-
-
Handle<Code> Factory::NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_ref,
@@ -950,9 +937,6 @@ Handle<JSObject> Factory::NewJSObjectFromMap(Handle<Map> map) {
Handle<JSArray> Factory::NewJSArray(int capacity,
ElementsKind elements_kind,
PretenureFlag pretenure) {
- if (capacity != 0) {
- elements_kind = GetHoleyElementsKind(elements_kind);
- }
CALL_HEAP_FUNCTION(isolate(),
isolate()->heap()->AllocateJSArrayAndStorage(
elements_kind,
@@ -971,7 +955,6 @@ Handle<JSArray> Factory::NewJSArrayWithElements(Handle<FixedArrayBase> elements,
isolate(),
isolate()->heap()->AllocateJSArrayWithElements(*elements,
elements_kind,
- elements->length(),
pretenure),
JSArray);
}
@@ -1370,7 +1353,7 @@ Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
// Check to see whether there is a matching element in the cache.
Handle<MapCache> cache =
Handle<MapCache>(MapCache::cast(context->map_cache()));
- Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
+ Handle<Object> result = Handle<Object>(cache->Lookup(*keys));
if (result->IsMap()) return Handle<Map>::cast(result);
// Create a new map and add it to the cache.
Handle<Map> map =
@@ -1422,7 +1405,7 @@ void Factory::ConfigureInstance(Handle<FunctionTemplateInfo> desc,
bool* pending_exception) {
// Configure the instance by adding the properties specified by the
// instance template.
- Handle<Object> instance_template(desc->instance_template(), isolate());
+ Handle<Object> instance_template = Handle<Object>(desc->instance_template());
if (!instance_template->IsUndefined()) {
Execution::ConfigureInstance(instance,
instance_template,
diff --git a/deps/v8/src/factory.h b/deps/v8/src/factory.h
index dd613b71e..51065aac4 100644
--- a/deps/v8/src/factory.h
+++ b/deps/v8/src/factory.h
@@ -82,7 +82,7 @@ class Factory {
Handle<String> LookupSymbol(Vector<const char> str);
Handle<String> LookupSymbol(Handle<String> str);
Handle<String> LookupAsciiSymbol(Vector<const char> str);
- Handle<String> LookupAsciiSymbol(Handle<SeqOneByteString>,
+ Handle<String> LookupAsciiSymbol(Handle<SeqAsciiString>,
int from,
int length);
Handle<String> LookupTwoByteSymbol(Vector<const uc16> str);
@@ -130,7 +130,7 @@ class Factory {
// Allocates and partially initializes an ASCII or TwoByte String. The
// characters of the string are uninitialized. Currently used in regexp code
// only, where they are pretenured.
- Handle<SeqOneByteString> NewRawOneByteString(
+ Handle<SeqAsciiString> NewRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
Handle<SeqTwoByteString> NewRawTwoByteString(
@@ -239,9 +239,6 @@ class Factory {
Handle<FixedArray> CopyFixedArray(Handle<FixedArray> array);
- Handle<FixedArray> CopySizeFixedArray(Handle<FixedArray> array,
- int new_length);
-
Handle<FixedDoubleArray> CopyFixedDoubleArray(
Handle<FixedDoubleArray> array);
@@ -328,8 +325,6 @@ class Factory {
Handle<ScopeInfo> NewScopeInfo(int length);
- Handle<JSObject> NewExternal(void* value);
-
Handle<Code> NewCode(const CodeDesc& desc,
Code::Flags flags,
Handle<Object> self_reference,
diff --git a/deps/v8/src/flag-definitions.h b/deps/v8/src/flag-definitions.h
index 338060f3f..4c7c090f4 100644
--- a/deps/v8/src/flag-definitions.h
+++ b/deps/v8/src/flag-definitions.h
@@ -144,16 +144,12 @@ DEFINE_bool(harmony_modules, false,
DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
DEFINE_bool(harmony_collections, false,
"enable harmony collections (sets, maps, and weak maps)")
-DEFINE_bool(harmony_observation, false,
- "enable harmony object observation (implies harmony collections")
DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
DEFINE_implication(harmony, harmony_scoping)
DEFINE_implication(harmony, harmony_modules)
DEFINE_implication(harmony, harmony_proxies)
DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_observation)
DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_observation, harmony_collections)
// Flags for experimental implementation features.
DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
@@ -181,7 +177,6 @@ DEFINE_int(max_inlined_nodes, 196,
DEFINE_int(max_inlined_nodes_cumulative, 196,
"maximum cumulative number of AST nodes considered for inlining")
DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
DEFINE_bool(collect_megamorphic_maps_from_stub_cache,
true,
"crankshaft harvests type feedback from stub cache")
@@ -226,7 +221,7 @@ DEFINE_int(loop_weight, 1, "loop weight for representation inference")
DEFINE_bool(optimize_for_in, true,
"optimize functions containing for-in loops")
DEFINE_bool(opt_safe_uint32_operations, true,
- "allow uint32 values on optimize frames if they are used only in "
+ "allow uint32 values on optimize frames if they are used only in"
"safe operations")
DEFINE_bool(parallel_recompilation, false,
@@ -234,9 +229,6 @@ DEFINE_bool(parallel_recompilation, false,
DEFINE_bool(trace_parallel_recompilation, false, "track parallel recompilation")
DEFINE_int(parallel_recompilation_queue_length, 2,
"the length of the parallel compilation queue")
-DEFINE_bool(manual_parallel_recompilation, false,
- "disable automatic optimization")
-DEFINE_implication(manual_parallel_recompilation, parallel_recompilation)
// Experimental profiler changes.
DEFINE_bool(experimental_profiler, true, "enable all profiler experiments")
@@ -247,6 +239,8 @@ DEFINE_bool(self_optimization, false,
DEFINE_bool(direct_self_opt, false,
"call recompile stub directly when self-optimizing")
DEFINE_bool(retry_self_opt, false, "re-try self-optimization if it failed")
+DEFINE_bool(count_based_interrupts, false,
+ "trigger profiler ticks based on counting instead of timing")
DEFINE_bool(interrupt_at_exit, false,
"insert an interrupt check at function exit")
DEFINE_bool(weighted_back_edges, false,
@@ -262,6 +256,7 @@ DEFINE_implication(experimental_profiler, watch_ic_patching)
DEFINE_implication(experimental_profiler, self_optimization)
// Not implying direct_self_opt here because it seems to be a bad idea.
DEFINE_implication(experimental_profiler, retry_self_opt)
+DEFINE_implication(experimental_profiler, count_based_interrupts)
DEFINE_implication(experimental_profiler, interrupt_at_exit)
DEFINE_implication(experimental_profiler, weighted_back_edges)
@@ -394,12 +389,7 @@ DEFINE_bool(trace_external_memory, false,
DEFINE_bool(collect_maps, true,
"garbage collect maps from which no objects can be reached")
DEFINE_bool(flush_code, true,
- "flush code that we expect not to use again (during full gc)")
-DEFINE_bool(flush_code_incrementally, true,
- "flush code that we expect not to use again (incrementally)")
-DEFINE_bool(age_code, true,
- "track un-executed functions to age code and flush only "
- "old code")
+ "flush code that we expect not to use again before full gc")
DEFINE_bool(incremental_marking, true, "use incremental marking")
DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
DEFINE_bool(trace_incremental_marking, false,
@@ -439,9 +429,6 @@ DEFINE_bool(incremental_code_compaction, true,
DEFINE_bool(cleanup_code_caches_at_gc, true,
"Flush inline caches prior to mark compact collection and "
"flush code caches in maps during mark compact cycle.")
-DEFINE_bool(use_marking_progress_bar, true,
- "Use a progress bar to scan large objects in increments when "
- "incremental marking is active.")
DEFINE_int(random_seed, 0,
"Default seed for initializing random generator "
"(0, the default, means to use system random).")
@@ -655,14 +642,12 @@ DEFINE_bool(prof_lazy, false,
DEFINE_bool(prof_browser_mode, true,
"Used with --prof, turns on browser-compatible mode for profiling.")
DEFINE_bool(log_regexp, false, "Log regular expression execution.")
+DEFINE_bool(sliding_state_window, false,
+ "Update sliding state window counters.")
DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
"Specify the name of the file for fake gc mmap used in ll_prof")
-DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
-DEFINE_bool(log_timer_events, false,
- "Time events including external callbacks.")
-DEFINE_implication(log_timer_events, log_internal_timer_events)
//
// Disassembler only flags
diff --git a/deps/v8/src/frames.cc b/deps/v8/src/frames.cc
index 3b60fb59f..18dc54164 100644
--- a/deps/v8/src/frames.cc
+++ b/deps/v8/src/frames.cc
@@ -484,7 +484,7 @@ Address StackFrame::UnpaddedFP() const {
Code* EntryFrame::unchecked_code() const {
- return HEAP->js_entry_code();
+ return HEAP->raw_unchecked_js_entry_code();
}
@@ -507,7 +507,7 @@ StackFrame::Type EntryFrame::GetCallerState(State* state) const {
Code* EntryConstructFrame::unchecked_code() const {
- return HEAP->js_construct_entry_code();
+ return HEAP->raw_unchecked_js_construct_entry_code();
}
diff --git a/deps/v8/src/full-codegen.cc b/deps/v8/src/full-codegen.cc
index 928da4a76..9592e0afa 100644
--- a/deps/v8/src/full-codegen.cc
+++ b/deps/v8/src/full-codegen.cc
@@ -86,10 +86,6 @@ void BreakableStatementChecker::VisitModuleUrl(ModuleUrl* module) {
}
-void BreakableStatementChecker::VisitModuleStatement(ModuleStatement* stmt) {
-}
-
-
void BreakableStatementChecker::VisitBlock(Block* stmt) {
}
@@ -470,8 +466,9 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
}
-void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
- // The pc offset does not need to be encoded and packed together with a state.
+void FullCodeGenerator::RecordStackCheck(BailoutId ast_id) {
+ // The pc offset does not need to be encoded and packed together with a
+ // state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry, zone());
@@ -585,137 +582,16 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
}
-void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
- ASSERT(scope_->is_global_scope());
-
- for (int i = 0; i < declarations->length(); i++) {
- ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
- if (declaration != NULL) {
- ModuleLiteral* module = declaration->module()->AsModuleLiteral();
- if (module != NULL) {
- Comment cmnt(masm_, "[ Link nested modules");
- Scope* scope = module->body()->scope();
- Interface* interface = scope->interface();
- ASSERT(interface->IsModule() && interface->IsFrozen());
-
- interface->Allocate(scope->module_var()->index());
-
- // Set up module context.
- ASSERT(scope->interface()->Index() >= 0);
- __ Push(Smi::FromInt(scope->interface()->Index()));
- __ Push(scope->GetScopeInfo());
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
-
- AllocateModules(scope->declarations());
-
- // Pop module context.
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- }
- }
-}
-
-
-// Modules have their own local scope, represented by their own context.
-// Module instance objects have an accessor for every export that forwards
-// access to the respective slot from the module's context. (Exports that are
-// modules themselves, however, are simple data properties.)
-//
-// All modules have a _hosting_ scope/context, which (currently) is the
-// (innermost) enclosing global scope. To deal with recursion, nested modules
-// are hosted by the same scope as global ones.
-//
-// For every (global or nested) module literal, the hosting context has an
-// internal slot that points directly to the respective module context. This
-// enables quick access to (statically resolved) module members by 2-dimensional
-// access through the hosting context. For example,
-//
-// module A {
-// let x;
-// module B { let y; }
-// }
-// module C { let z; }
-//
-// allocates contexts as follows:
-//
-// [header| .A | .B | .C | A | C ] (global)
-// | | |
-// | | +-- [header| z ] (module)
-// | |
-// | +------- [header| y ] (module)
-// |
-// +------------ [header| x | B ] (module)
-//
-// Here, .A, .B, .C are the internal slots pointing to the hosted module
-// contexts, whereas A, B, C hold the actual instance objects (note that every
-// module context also points to the respective instance object through its
-// extension slot in the header).
-//
-// To deal with arbitrary recursion and aliases between modules,
-// they are created and initialized in several stages. Each stage applies to
-// all modules in the hosting global scope, including nested ones.
-//
-// 1. Allocate: for each module _literal_, allocate the module contexts and
-// respective instance object and wire them up. This happens in the
-// PushModuleContext runtime function, as generated by AllocateModules
-// (invoked by VisitDeclarations in the hosting scope).
-//
-// 2. Bind: for each module _declaration_ (i.e. literals as well as aliases),
-// assign the respective instance object to respective local variables. This
-// happens in VisitModuleDeclaration, and uses the instance objects created
-// in the previous stage.
-// For each module _literal_, this phase also constructs a module descriptor
-// for the next stage. This happens in VisitModuleLiteral.
-//
-// 3. Populate: invoke the DeclareModules runtime function to populate each
-// _instance_ object with accessors for it exports. This is generated by
-// DeclareModules (invoked by VisitDeclarations in the hosting scope again),
-// and uses the descriptors generated in the previous stage.
-//
-// 4. Initialize: execute the module bodies (and other code) in sequence. This
-// happens by the separate statements generated for module bodies. To reenter
-// the module scopes properly, the parser inserted ModuleStatements.
-
void FullCodeGenerator::VisitDeclarations(
ZoneList<Declaration*>* declarations) {
- Handle<FixedArray> saved_modules = modules_;
- int saved_module_index = module_index_;
ZoneList<Handle<Object> >* saved_globals = globals_;
ZoneList<Handle<Object> > inner_globals(10, zone());
globals_ = &inner_globals;
- if (scope_->num_modules() != 0) {
- // This is a scope hosting modules. Allocate a descriptor array to pass
- // to the runtime for initialization.
- Comment cmnt(masm_, "[ Allocate modules");
- ASSERT(scope_->is_global_scope());
- modules_ =
- isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
- module_index_ = 0;
-
- // Generate code for allocating all modules, including nested ones.
- // The allocated contexts are stored in internal variables in this scope.
- AllocateModules(declarations);
- }
-
AstVisitor::VisitDeclarations(declarations);
-
- if (scope_->num_modules() != 0) {
- // Initialize modules from descriptor array.
- ASSERT(module_index_ == modules_->length());
- DeclareModules(modules_);
- modules_ = saved_modules;
- module_index_ = saved_module_index;
- }
-
if (!globals_->is_empty()) {
// Invoke the platform-dependent code generator to do the actual
- // declaration of the global functions and variables.
+ // declaration the global functions and variables.
Handle<FixedArray> array =
isolate()->factory()->NewFixedArray(globals_->length(), TENURED);
for (int i = 0; i < globals_->length(); ++i)
@@ -728,23 +604,19 @@ void FullCodeGenerator::VisitDeclarations(
void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
+ // Allocate a module context statically.
Block* block = module->body();
Scope* saved_scope = scope();
scope_ = block->scope();
- Interface* interface = scope_->interface();
+ Interface* interface = module->interface();
+ Handle<JSModule> instance = interface->Instance();
Comment cmnt(masm_, "[ ModuleLiteral");
SetStatementPosition(block);
- ASSERT(!modules_.is_null());
- ASSERT(module_index_ < modules_->length());
- int index = module_index_++;
-
// Set up module context.
- ASSERT(interface->Index() >= 0);
- __ Push(Smi::FromInt(interface->Index()));
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
+ __ Push(instance);
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
{
@@ -752,11 +624,6 @@ void FullCodeGenerator::VisitModuleLiteral(ModuleLiteral* module) {
VisitDeclarations(scope_->declarations());
}
- // Populate the module description.
- Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
- modules_->set(index, *description);
-
scope_ = saved_scope;
// Pop module context.
LoadContextField(context_register(), Context::PREVIOUS_INDEX);
@@ -777,20 +644,8 @@ void FullCodeGenerator::VisitModulePath(ModulePath* module) {
}
-void FullCodeGenerator::VisitModuleUrl(ModuleUrl* module) {
- // TODO(rossberg): dummy allocation for now.
- Scope* scope = module->body()->scope();
- Interface* interface = scope_->interface();
-
- ASSERT(interface->IsModule() && interface->IsFrozen());
- ASSERT(!modules_.is_null());
- ASSERT(module_index_ < modules_->length());
- interface->Allocate(scope->module_var()->index());
- int index = module_index_++;
-
- Handle<ModuleInfo> description =
- ModuleInfo::Create(isolate(), interface, scope_);
- modules_->set(index, *description);
+void FullCodeGenerator::VisitModuleUrl(ModuleUrl* decl) {
+ // TODO(rossberg)
}
@@ -1049,28 +904,37 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
// Push a block context when entering a block with block scoped variables.
if (stmt->scope() != NULL) {
scope_ = stmt->scope();
- ASSERT(!scope_->is_module_scope());
- { Comment cmnt(masm_, "[ Extend block context");
- Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
- int heap_slots = scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
- __ Push(scope_info);
- PushFunctionArgumentForContextAllocation();
- if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
- FastNewBlockContextStub stub(heap_slots);
- __ CallStub(&stub);
- } else {
- __ CallRuntime(Runtime::kPushBlockContext, 2);
+ if (scope_->is_module_scope()) {
+ // If this block is a module body, then we have already allocated and
+ // initialized the declarations earlier. Just push the context.
+ ASSERT(!scope_->interface()->Instance().is_null());
+ __ Push(scope_->interface()->Instance());
+ __ CallRuntime(Runtime::kPushModuleContext, 1);
+ StoreToFrameField(
+ StandardFrameConstants::kContextOffset, context_register());
+ } else {
+ { Comment cmnt(masm_, "[ Extend block context");
+ Handle<ScopeInfo> scope_info = scope_->GetScopeInfo();
+ int heap_slots =
+ scope_info->ContextLength() - Context::MIN_CONTEXT_SLOTS;
+ __ Push(scope_info);
+ PushFunctionArgumentForContextAllocation();
+ if (heap_slots <= FastNewBlockContextStub::kMaximumSlots) {
+ FastNewBlockContextStub stub(heap_slots);
+ __ CallStub(&stub);
+ } else {
+ __ CallRuntime(Runtime::kPushBlockContext, 2);
+ }
+
+ // Replace the context stored in the frame.
+ StoreToFrameField(StandardFrameConstants::kContextOffset,
+ context_register());
+ }
+ { Comment cmnt(masm_, "[ Declarations");
+ VisitDeclarations(scope_->declarations());
}
-
- // Replace the context stored in the frame.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
- }
- { Comment cmnt(masm_, "[ Declarations");
- VisitDeclarations(scope_->declarations());
}
}
-
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
VisitStatements(stmt->statements());
scope_ = saved_scope;
@@ -1087,26 +951,6 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
}
-void FullCodeGenerator::VisitModuleStatement(ModuleStatement* stmt) {
- Comment cmnt(masm_, "[ Module context");
-
- __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
- __ Push(Smi::FromInt(0));
- __ CallRuntime(Runtime::kPushModuleContext, 2);
- StoreToFrameField(
- StandardFrameConstants::kContextOffset, context_register());
-
- Scope* saved_scope = scope_;
- scope_ = stmt->body()->scope();
- VisitStatements(stmt->body()->statements());
- scope_ = saved_scope;
- LoadContextField(context_register(), Context::PREVIOUS_INDEX);
- // Update local stack frame context field.
- StoreToFrameField(StandardFrameConstants::kContextOffset,
- context_register());
-}
-
-
void FullCodeGenerator::VisitExpressionStatement(ExpressionStatement* stmt) {
Comment cmnt(masm_, "[ ExpressionStatement");
SetStatementPosition(stmt);
@@ -1267,7 +1111,7 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
- EmitBackEdgeBookkeeping(stmt, &body);
+ EmitStackCheck(stmt, &body);
__ jmp(&body);
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1296,7 +1140,7 @@ void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitBackEdgeBookkeeping(stmt, &body);
+ EmitStackCheck(stmt, &body);
__ bind(&test);
VisitForControl(stmt->cond(),
@@ -1342,7 +1186,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
SetStatementPosition(stmt);
// Check stack before looping.
- EmitBackEdgeBookkeeping(stmt, &body);
+ EmitStackCheck(stmt, &body);
__ bind(&test);
if (stmt->cond() != NULL) {
diff --git a/deps/v8/src/full-codegen.h b/deps/v8/src/full-codegen.h
index 2f8818480..89b51f958 100644
--- a/deps/v8/src/full-codegen.h
+++ b/deps/v8/src/full-codegen.h
@@ -396,15 +396,9 @@ class FullCodeGenerator: public AstVisitor {
void VisitInDuplicateContext(Expression* expr);
void VisitDeclarations(ZoneList<Declaration*>* declarations);
- void DeclareModules(Handle<FixedArray> descriptions);
void DeclareGlobals(Handle<FixedArray> pairs);
int DeclareGlobalsFlags();
- // Generate code to allocate all (including nested) modules and contexts.
- // Because of recursive linking and the presence of module alias declarations,
- // this has to be a separate pass _before_ populating or executing any module.
- void AllocateModules(ZoneList<Declaration*>* declarations);
-
// Try to perform a comparison as a fast inlined literal compare if
// the operands allow it. Returns true if the compare operations
// has been matched and all code generated; false otherwise.
@@ -448,13 +442,14 @@ class FullCodeGenerator: public AstVisitor {
// neither a with nor a catch context.
void EmitDebugCheckDeclarationContext(Variable* variable);
+ // Platform-specific code for checking the stack limit at the back edge of
+ // a loop.
// This is meant to be called at loop back edges, |back_edge_target| is
// the jump target of the back edge and is used to approximate the amount
// of code inside the loop.
- void EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target);
- // Record the OSR AST id corresponding to a back edge in the code.
- void RecordBackEdge(BailoutId osr_ast_id);
+ void EmitStackCheck(IterationStatement* stmt, Label* back_edge_target);
+ // Record the OSR AST id corresponding to a stack check in the code.
+ void RecordStackCheck(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
@@ -809,12 +804,8 @@ class FullCodeGenerator: public AstVisitor {
NestedStatement* nesting_stack_;
int loop_depth_;
ZoneList<Handle<Object> >* globals_;
- Handle<FixedArray> modules_;
- int module_index_;
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
- // TODO(svenpanne) Rename this to something like back_edges_ and rename
- // related functions accordingly.
ZoneList<BailoutEntry> stack_checks_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
diff --git a/deps/v8/src/global-handles.cc b/deps/v8/src/global-handles.cc
index 392a1810b..c09ba4b47 100644
--- a/deps/v8/src/global-handles.cc
+++ b/deps/v8/src/global-handles.cc
@@ -69,7 +69,6 @@ class GlobalHandles::Node {
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
index_ = 0;
independent_ = false;
- partially_dependent_ = false;
in_new_space_list_ = false;
parameter_or_next_free_.next_free = NULL;
callback_ = NULL;
@@ -90,7 +89,6 @@ class GlobalHandles::Node {
object_ = object;
class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
independent_ = false;
- partially_dependent_ = false;
state_ = NORMAL;
parameter_or_next_free_.parameter = NULL;
callback_ = NULL;
@@ -156,15 +154,6 @@ class GlobalHandles::Node {
}
bool is_independent() const { return independent_; }
- void MarkPartiallyDependent(GlobalHandles* global_handles) {
- ASSERT(state_ != FREE);
- if (global_handles->isolate()->heap()->InNewSpace(object_)) {
- partially_dependent_ = true;
- }
- }
- bool is_partially_dependent() const { return partially_dependent_; }
- void clear_partially_dependent() { partially_dependent_ = false; }
-
// In-new-space-list flag accessors.
void set_in_new_space_list(bool v) { in_new_space_list_ = v; }
bool is_in_new_space_list() const { return in_new_space_list_; }
@@ -271,7 +260,6 @@ class GlobalHandles::Node {
State state_ : 4;
bool independent_ : 1;
- bool partially_dependent_ : 1;
bool in_new_space_list_ : 1;
// Handle specific callback.
@@ -460,11 +448,6 @@ void GlobalHandles::MarkIndependent(Object** location) {
}
-void GlobalHandles::MarkPartiallyDependent(Object** location) {
- Node::FromLocation(location)->MarkPartiallyDependent(this);
-}
-
-
bool GlobalHandles::IsIndependent(Object** location) {
return Node::FromLocation(location)->is_independent();
}
@@ -518,9 +501,8 @@ void GlobalHandles::IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
if (node->IsStrongRetainer() ||
- (node->IsWeakRetainer() && !node->is_independent() &&
- !node->is_partially_dependent())) {
- v->VisitPointer(node->location());
+ (node->IsWeakRetainer() && !node->is_independent())) {
+ v->VisitPointer(node->location());
}
}
}
@@ -531,8 +513,8 @@ void GlobalHandles::IdentifyNewSpaceWeakIndependentHandles(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeak() && f(isolate_->heap(), node->location())) {
+ if (node->is_independent() && node->IsWeak() &&
+ f(isolate_->heap(), node->location())) {
node->MarkPending();
}
}
@@ -543,61 +525,15 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if ((node->is_independent() || node->is_partially_dependent()) &&
- node->IsWeakRetainer()) {
+ if (node->is_independent() && node->IsWeakRetainer()) {
v->VisitPointer(node->location());
}
}
}
-bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
- WeakSlotCallbackWithHeap can_skip) {
- int last = 0;
- bool any_group_was_visited = false;
- for (int i = 0; i < object_groups_.length(); i++) {
- ObjectGroup* entry = object_groups_.at(i);
- ASSERT(entry != NULL);
-
- Object*** objects = entry->objects_;
- bool group_should_be_visited = false;
- for (size_t j = 0; j < entry->length_; j++) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- if (!can_skip(isolate_->heap(), &object)) {
- group_should_be_visited = true;
- break;
- }
- }
- }
-
- if (!group_should_be_visited) {
- object_groups_[last++] = entry;
- continue;
- }
-
- // An object in the group requires visiting, so iterate over all
- // objects in the group.
- for (size_t j = 0; j < entry->length_; ++j) {
- Object* object = *objects[j];
- if (object->IsHeapObject()) {
- v->VisitPointer(&object);
- any_group_was_visited = true;
- }
- }
-
- // Once the entire group has been iterated over, set the object
- // group to NULL so it won't be processed again.
- entry->Dispose();
- object_groups_.at(i) = NULL;
- }
- object_groups_.Rewind(last);
- return any_group_was_visited;
-}
-
-
bool GlobalHandles::PostGarbageCollectionProcessing(
- GarbageCollector collector, GCTracer* tracer) {
+ GarbageCollector collector) {
// Process weak global handle callbacks. This must be done after the
// GC is completely done, because the callbacks may invoke arbitrary
// API functions.
@@ -611,10 +547,7 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
// Skip dependent handles. Their weak callbacks might expect to be
// called between two global garbage collection callbacks which
// are not called for minor collections.
- if (!node->is_independent() && !node->is_partially_dependent()) {
- continue;
- }
- node->clear_partially_dependent();
+ if (!node->is_independent()) continue;
if (node->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// Weak callback triggered another GC and another round of
@@ -630,7 +563,6 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
}
} else {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- it.node()->clear_partially_dependent();
if (it.node()->PostGarbageCollectionProcessing(isolate_, this)) {
if (initial_post_gc_processing_count != post_gc_processing_count_) {
// See the comment above.
@@ -647,17 +579,10 @@ bool GlobalHandles::PostGarbageCollectionProcessing(
for (int i = 0; i < new_space_nodes_.length(); ++i) {
Node* node = new_space_nodes_[i];
ASSERT(node->is_in_new_space_list());
- if (node->IsRetainer()) {
- if (isolate_->heap()->InNewSpace(node->object())) {
- new_space_nodes_[last++] = node;
- tracer->increment_nodes_copied_in_new_space();
- } else {
- node->set_in_new_space_list(false);
- tracer->increment_nodes_promoted();
- }
+ if (node->IsRetainer() && isolate_->heap()->InNewSpace(node->object())) {
+ new_space_nodes_[last++] = node;
} else {
node->set_in_new_space_list(false);
- tracer->increment_nodes_died_in_new_space();
}
}
new_space_nodes_.Rewind(last);
@@ -685,7 +610,7 @@ void GlobalHandles::IterateAllRoots(ObjectVisitor* v) {
void GlobalHandles::IterateAllRootsWithClassIds(ObjectVisitor* v) {
for (NodeIterator it(this); !it.done(); it.Advance()) {
- if (it.node()->IsRetainer() && it.node()->has_wrapper_class_id()) {
+ if (it.node()->has_wrapper_class_id() && it.node()->IsRetainer()) {
v->VisitEmbedderReference(it.node()->location(),
it.node()->wrapper_class_id());
}
diff --git a/deps/v8/src/global-handles.h b/deps/v8/src/global-handles.h
index 7808d16a0..866317ee1 100644
--- a/deps/v8/src/global-handles.h
+++ b/deps/v8/src/global-handles.h
@@ -155,9 +155,6 @@ class GlobalHandles {
// Clear the weakness of a global handle.
void MarkIndependent(Object** location);
- // Mark the reference to this object externaly unreachable.
- void MarkPartiallyDependent(Object** location);
-
static bool IsIndependent(Object** location);
// Tells whether global handle is near death.
@@ -168,8 +165,7 @@ class GlobalHandles {
// Process pending weak handles.
// Returns true if next major GC is likely to collect more garbage.
- bool PostGarbageCollectionProcessing(GarbageCollector collector,
- GCTracer* tracer);
+ bool PostGarbageCollectionProcessing(GarbageCollector collector);
// Iterates over all strong handles.
void IterateStrongRoots(ObjectVisitor* v);
@@ -199,22 +195,16 @@ class GlobalHandles {
// Iterates over strong and dependent handles. See the node above.
void IterateNewSpaceStrongAndDependentRoots(ObjectVisitor* v);
- // Finds weak independent or partially independent handles satisfying
- // the callback predicate and marks them as pending. See the note above.
+ // Finds weak independent handles satisfying the callback predicate
+ // and marks them as pending. See the note above.
void IdentifyNewSpaceWeakIndependentHandles(WeakSlotCallbackWithHeap f);
- // Iterates over weak independent or partially independent handles.
- // See the note above.
+ // Iterates over weak independent handles. See the note above.
void IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v);
- // Iterate over objects in object groups that have at least one object
- // which requires visiting. The callback has to return true if objects
- // can be skipped and false otherwise.
- bool IterateObjectGroups(ObjectVisitor* v, WeakSlotCallbackWithHeap can_skip);
-
// Add an object group.
// Should be only used in GC callback function before a collection.
- // All groups are destroyed after a garbage collection.
+ // All groups are destroyed after a mark-compact collection.
void AddObjectGroup(Object*** handles,
size_t length,
v8::RetainedObjectInfo* info);
diff --git a/deps/v8/src/handles.cc b/deps/v8/src/handles.cc
index 7397cc00b..46399d65e 100644
--- a/deps/v8/src/handles.cc
+++ b/deps/v8/src/handles.cc
@@ -229,12 +229,12 @@ Handle<Object> SetPrototype(Handle<JSFunction> function,
}
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
+Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
StrictModeFlag strict_mode) {
+ Isolate* isolate = Isolate::Current();
CALL_HEAP_FUNCTION(
isolate,
Runtime::SetObjectProperty(
@@ -593,25 +593,6 @@ v8::Handle<v8::Array> GetKeysForIndexedInterceptor(Handle<JSReceiver> receiver,
}
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script) {
- Isolate* isolate = script->GetIsolate();
- Handle<String> name_or_source_url_key =
- isolate->factory()->LookupAsciiSymbol("nameOrSourceURL");
- Handle<JSValue> script_wrapper = GetScriptWrapper(script);
- Handle<Object> property = GetProperty(script_wrapper,
- name_or_source_url_key);
- ASSERT(property->IsJSFunction());
- Handle<JSFunction> method = Handle<JSFunction>::cast(property);
- bool caught_exception;
- Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
- NULL, &caught_exception);
- if (caught_exception) {
- result = isolate->factory()->undefined_value();
- }
- return result;
-}
-
-
static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
int len = array->length();
for (int i = 0; i < len; i++) {
@@ -915,7 +896,7 @@ int Utf8LengthHelper(String* input,
int total = 0;
bool dummy;
while (true) {
- if (input->IsOneByteRepresentation()) {
+ if (input->IsAsciiRepresentation()) {
*starts_with_surrogate = false;
return total + to - from;
}
@@ -948,14 +929,14 @@ int Utf8LengthHelper(String* input,
} else {
if (first_length > from) {
// Left hand side is shorter.
- if (first->IsOneByteRepresentation()) {
+ if (first->IsAsciiRepresentation()) {
total += first_length - from;
*starts_with_surrogate = false;
starts_with_surrogate = &dummy;
input = second;
from = 0;
to -= first_length;
- } else if (second->IsOneByteRepresentation()) {
+ } else if (second->IsAsciiRepresentation()) {
followed_by_surrogate = false;
total += to - first_length;
input = first;
diff --git a/deps/v8/src/handles.h b/deps/v8/src/handles.h
index 032fbe481..a1d88c2f8 100644
--- a/deps/v8/src/handles.h
+++ b/deps/v8/src/handles.h
@@ -95,13 +95,6 @@ class Handle {
};
-// Convenience wrapper.
-template<class T>
-inline Handle<T> handle(T* t, Isolate* isolate) {
- return Handle<T>(t, isolate);
-}
-
-
class DeferredHandles;
class HandleScopeImplementer;
@@ -216,8 +209,7 @@ Handle<String> FlattenGetString(Handle<String> str);
int Utf8Length(Handle<String> str);
-Handle<Object> SetProperty(Isolate* isolate,
- Handle<Object> object,
+Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes,
@@ -268,7 +260,6 @@ int GetScriptLineNumber(Handle<Script> script, int code_position);
// The safe version does not make heap allocations but may work much slower.
int GetScriptLineNumberSafe(Handle<Script> script, int code_position);
int GetScriptColumnNumber(Handle<Script> script, int code_position);
-Handle<Object> GetScriptNameOrSourceURL(Handle<Script> script);
// Computes the enumerable keys from interceptors. Used for debug mirrors and
// by GetKeysInFixedArrayFor below.
diff --git a/deps/v8/src/heap-inl.h b/deps/v8/src/heap-inl.h
index de47c94a8..bace902d4 100644
--- a/deps/v8/src/heap-inl.h
+++ b/deps/v8/src/heap-inl.h
@@ -91,7 +91,7 @@ MaybeObject* Heap::AllocateStringFromUtf8(Vector<const char> str,
if (non_ascii_start >= length) {
// If the string is ASCII, we do not need to convert the characters
// since UTF8 is backwards compatible with ASCII.
- return AllocateStringFromOneByte(str, pretenure);
+ return AllocateStringFromAscii(str, pretenure);
}
// Non-ASCII and we need to decode.
return AllocateStringFromUtf8Slow(str, non_ascii_start, pretenure);
@@ -109,12 +109,12 @@ MaybeObject* Heap::AllocateSymbol(Vector<const char> str,
MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
uint32_t hash_field) {
- if (str.length() > SeqOneByteString::kMaxLength) {
+ if (str.length() > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
// Compute map and object size.
Map* map = ascii_symbol_map();
- int size = SeqOneByteString::SizeFor(str.length());
+ int size = SeqAsciiString::SizeFor(str.length());
// Allocate string.
Object* result;
@@ -134,7 +134,7 @@ MaybeObject* Heap::AllocateAsciiSymbol(Vector<const char> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
- memcpy(answer->address() + SeqOneByteString::kHeaderSize,
+ memcpy(answer->address() + SeqAsciiString::kHeaderSize,
str.start(), str.length());
return answer;
@@ -460,7 +460,7 @@ intptr_t Heap::AdjustAmountOfExternalAllocatedMemory(
intptr_t change_in_bytes) {
ASSERT(HasBeenSetUp());
intptr_t amount = amount_of_external_allocated_memory_ + change_in_bytes;
- if (change_in_bytes > 0) {
+ if (change_in_bytes >= 0) {
// Avoid overflow.
if (amount > amount_of_external_allocated_memory_) {
amount_of_external_allocated_memory_ = amount;
@@ -607,7 +607,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(new_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
@@ -617,7 +617,7 @@ void ExternalStringTable::Verify() {
Object* obj = Object::cast(old_space_strings_[i]);
// TODO(yangguo): check that the object is indeed an external string.
ASSERT(!heap_->InNewSpace(obj));
- ASSERT(obj != HEAP->the_hole_value());
+ ASSERT(obj != HEAP->raw_unchecked_the_hole_value());
if (obj->IsExternalAsciiString()) {
ExternalAsciiString* string = ExternalAsciiString::cast(obj);
ASSERT(String::IsAscii(string->GetChars(), string->length()));
diff --git a/deps/v8/src/heap-profiler.cc b/deps/v8/src/heap-profiler.cc
index 45a93f9e1..301b09993 100644
--- a/deps/v8/src/heap-profiler.cc
+++ b/deps/v8/src/heap-profiler.cc
@@ -65,29 +65,23 @@ void HeapProfiler::TearDown() {
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(const char* name,
+ int type,
+ v8::ActivityControl* control) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control,
- resolver);
+ control);
}
-HeapSnapshot* HeapProfiler::TakeSnapshot(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
+HeapSnapshot* HeapProfiler::TakeSnapshot(String* name,
+ int type,
+ v8::ActivityControl* control) {
ASSERT(Isolate::Current()->heap_profiler() != NULL);
return Isolate::Current()->heap_profiler()->TakeSnapshotImpl(name,
type,
- control,
- resolver);
+ control);
}
@@ -128,18 +122,16 @@ v8::RetainedObjectInfo* HeapProfiler::ExecuteWrapperClassCallback(
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(const char* name,
+ int type,
+ v8::ActivityControl* control) {
HeapSnapshot::Type s_type = static_cast<HeapSnapshot::Type>(type);
HeapSnapshot* result =
snapshots_->NewSnapshot(s_type, name, next_snapshot_uid_++);
bool generation_completed = true;
switch (s_type) {
case HeapSnapshot::kFull: {
- HeapSnapshotGenerator generator(result, control, resolver);
+ HeapSnapshotGenerator generator(result, control);
generation_completed = generator.GenerateSnapshot();
break;
}
@@ -155,13 +147,10 @@ HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
}
-HeapSnapshot* HeapProfiler::TakeSnapshotImpl(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver) {
- return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control,
- resolver);
+HeapSnapshot* HeapProfiler::TakeSnapshotImpl(String* name,
+ int type,
+ v8::ActivityControl* control) {
+ return TakeSnapshotImpl(snapshots_->names()->GetName(name), type, control);
}
void HeapProfiler::StartHeapObjectsTrackingImpl() {
diff --git a/deps/v8/src/heap-profiler.h b/deps/v8/src/heap-profiler.h
index 9d3ba6f11..346177b8b 100644
--- a/deps/v8/src/heap-profiler.h
+++ b/deps/v8/src/heap-profiler.h
@@ -51,16 +51,12 @@ class HeapProfiler {
static size_t GetMemorySizeUsedByProfiler();
- static HeapSnapshot* TakeSnapshot(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- static HeapSnapshot* TakeSnapshot(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
+ static HeapSnapshot* TakeSnapshot(const char* name,
+ int type,
+ v8::ActivityControl* control);
+ static HeapSnapshot* TakeSnapshot(String* name,
+ int type,
+ v8::ActivityControl* control);
static void StartHeapObjectsTracking();
static void StopHeapObjectsTracking();
@@ -85,16 +81,12 @@ class HeapProfiler {
private:
HeapProfiler();
~HeapProfiler();
- HeapSnapshot* TakeSnapshotImpl(
- const char* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
- HeapSnapshot* TakeSnapshotImpl(
- String* name,
- int type,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
+ HeapSnapshot* TakeSnapshotImpl(const char* name,
+ int type,
+ v8::ActivityControl* control);
+ HeapSnapshot* TakeSnapshotImpl(String* name,
+ int type,
+ v8::ActivityControl* control);
void ResetSnapshots();
void StartHeapObjectsTrackingImpl();
diff --git a/deps/v8/src/heap.cc b/deps/v8/src/heap.cc
index 746c9b6d6..e3fcb93a7 100644
--- a/deps/v8/src/heap.cc
+++ b/deps/v8/src/heap.cc
@@ -117,6 +117,7 @@ Heap::Heap()
allocation_allowed_(true),
allocation_timeout_(0),
disallow_allocation_failure_(false),
+ debug_utils_(NULL),
#endif // DEBUG
new_space_high_promotion_mode_active_(false),
old_gen_promotion_limit_(kMinimumPromotionLimit),
@@ -136,7 +137,6 @@ Heap::Heap()
tracer_(NULL),
young_survivors_after_last_gc_(0),
high_survival_rate_period_length_(0),
- low_survival_rate_period_length_(0),
survival_rate_(0),
previous_survival_rate_trend_(Heap::STABLE),
survival_rate_trend_(Heap::STABLE),
@@ -212,20 +212,6 @@ intptr_t Heap::CommittedMemory() {
lo_space_->Size();
}
-
-size_t Heap::CommittedPhysicalMemory() {
- if (!HasBeenSetUp()) return 0;
-
- return new_space_.CommittedPhysicalMemory() +
- old_pointer_space_->CommittedPhysicalMemory() +
- old_data_space_->CommittedPhysicalMemory() +
- code_space_->CommittedPhysicalMemory() +
- map_space_->CommittedPhysicalMemory() +
- cell_space_->CommittedPhysicalMemory() +
- lo_space_->CommittedPhysicalMemory();
-}
-
-
intptr_t Heap::CommittedMemoryExecutable() {
if (!HasBeenSetUp()) return 0;
@@ -420,10 +406,6 @@ void Heap::GarbageCollectionPrologue() {
gc_count_++;
unflattened_strings_length_ = 0;
- if (FLAG_flush_code && FLAG_flush_code_incrementally) {
- mark_compact_collector()->EnableCodeFlushing(true);
- }
-
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) {
Verify();
@@ -615,7 +597,7 @@ bool Heap::CollectGarbage(AllocationSpace space,
}
if (collector == MARK_COMPACTOR &&
- !mark_compact_collector()->abort_incremental_marking() &&
+ !mark_compact_collector()->abort_incremental_marking_ &&
!incremental_marking()->IsStopped() &&
!incremental_marking()->should_hurry() &&
FLAG_incremental_marking_steps) {
@@ -643,24 +625,24 @@ bool Heap::CollectGarbage(AllocationSpace space,
// Tell the tracer which collector we've selected.
tracer.set_collector(collector);
- {
- HistogramTimerScope histogram_timer_scope(
- (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
- : isolate_->counters()->gc_compactor());
- next_gc_likely_to_collect_more =
- PerformGarbageCollection(collector, &tracer);
- }
+ HistogramTimer* rate = (collector == SCAVENGER)
+ ? isolate_->counters()->gc_scavenger()
+ : isolate_->counters()->gc_compactor();
+ rate->Start();
+ next_gc_likely_to_collect_more =
+ PerformGarbageCollection(collector, &tracer);
+ rate->Stop();
+
+ ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
+ // This can do debug callbacks and restart incremental marking.
GarbageCollectionEpilogue();
}
- // Start incremental marking for the next cycle. The heap snapshot
- // generator needs incremental marking to stay off after it aborted.
- if (!mark_compact_collector()->abort_incremental_marking() &&
- incremental_marking()->IsStopped() &&
- incremental_marking()->WorthActivating() &&
- NextGCIsLikelyToBeFull()) {
- incremental_marking()->Start();
+ if (incremental_marking()->IsStopped()) {
+ if (incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull()) {
+ incremental_marking()->Start();
+ }
}
return next_gc_likely_to_collect_more;
@@ -957,16 +939,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
isolate_->counters()->objs_since_last_young()->Set(0);
- // Callbacks that fire after this point might trigger nested GCs and
- // restart incremental marking, the assertion can't be moved down.
- ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
-
gc_post_processing_depth_++;
{ DisableAssertNoAllocation allow_allocation;
GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
next_gc_likely_to_collect_more =
- isolate_->global_handles()->PostGarbageCollectionProcessing(
- collector, tracer);
+ isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
}
gc_post_processing_depth_--;
@@ -1328,23 +1305,10 @@ void Heap::Scavenge() {
}
}
- // Copy objects reachable from the code flushing candidates list.
- MarkCompactCollector* collector = mark_compact_collector();
- if (collector->is_code_flushing_enabled()) {
- collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
- }
-
// Scavenge object reachable from the native contexts list directly.
scavenge_visitor.VisitPointer(BitCast<Object**>(&native_contexts_list_));
new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
- while (isolate()->global_handles()->IterateObjectGroups(
- &scavenge_visitor, &IsUnscavengedHeapObject)) {
- new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
- }
- isolate()->global_handles()->RemoveObjectGroups();
-
isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
&IsUnscavengedHeapObject);
isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
@@ -1584,40 +1548,13 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
AssertNoAllocation no_allocation;
- // Both the external string table and the symbol table may contain
- // external strings, but neither lists them exhaustively, nor is the
- // intersection set empty. Therefore we iterate over the external string
- // table first, ignoring symbols, and then over the symbol table.
-
- class ExternalStringTableVisitorAdapter : public ObjectVisitor {
- public:
- explicit ExternalStringTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
- virtual void VisitPointers(Object** start, Object** end) {
- for (Object** p = start; p < end; p++) {
- // Visit non-symbol external strings,
- // since symbols are listed in the symbol table.
- if (!(*p)->IsSymbol()) {
- ASSERT((*p)->IsExternalString());
- visitor_->VisitExternalString(Utils::ToLocal(
- Handle<String>(String::cast(*p))));
- }
- }
- }
- private:
- v8::ExternalResourceVisitor* visitor_;
- } external_string_table_visitor(visitor);
-
- external_string_table_.Iterate(&external_string_table_visitor);
-
- class SymbolTableVisitorAdapter : public ObjectVisitor {
+ class VisitorAdapter : public ObjectVisitor {
public:
- explicit SymbolTableVisitorAdapter(
- v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
+ explicit VisitorAdapter(v8::ExternalResourceVisitor* visitor)
+ : visitor_(visitor) {}
virtual void VisitPointers(Object** start, Object** end) {
for (Object** p = start; p < end; p++) {
if ((*p)->IsExternalString()) {
- ASSERT((*p)->IsSymbol());
visitor_->VisitExternalString(Utils::ToLocal(
Handle<String>(String::cast(*p))));
}
@@ -1625,9 +1562,8 @@ void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
}
private:
v8::ExternalResourceVisitor* visitor_;
- } symbol_table_visitor(visitor);
-
- symbol_table()->IterateElements(&symbol_table_visitor);
+ } visitor_adapter(visitor);
+ external_string_table_.Iterate(&visitor_adapter);
}
@@ -1724,7 +1660,7 @@ template<MarksHandling marks_handling,
class ScavengingVisitor : public StaticVisitorBase {
public:
static void Initialize() {
- table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
+ table_.Register(kVisitSeqAsciiString, &EvacuateSeqAsciiString);
table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
table_.Register(kVisitByteArray, &EvacuateByteArray);
@@ -1968,11 +1904,11 @@ class ScavengingVisitor : public StaticVisitorBase {
}
- static inline void EvacuateSeqOneByteString(Map* map,
+ static inline void EvacuateSeqAsciiString(Map* map,
HeapObject** slot,
HeapObject* object) {
- int object_size = SeqOneByteString::cast(object)->
- SeqOneByteStringSize(map->instance_type());
+ int object_size = SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
EvacuateObject<DATA_OBJECT, UNKNOWN_SIZE, kObjectAlignment>(
map, slot, object, object_size);
}
@@ -2539,14 +2475,6 @@ bool Heap::CreateInitialMaps() {
}
set_message_object_map(Map::cast(obj));
- Map* external_map;
- { MaybeObject* maybe_obj =
- AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize);
- if (!maybe_obj->To(&external_map)) return false;
- }
- external_map->set_is_extensible(false);
- set_external_map(external_map);
-
ASSERT(!InNewSpace(empty_fixed_array()));
return true;
}
@@ -2765,7 +2693,7 @@ bool Heap::CreateInitialObjects() {
set_termination_exception(obj);
// Allocate the empty string.
- { MaybeObject* maybe_obj = AllocateRawOneByteString(0, TENURED);
+ { MaybeObject* maybe_obj = AllocateRawAsciiString(0, TENURED);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_empty_string(String::cast(obj));
@@ -2864,15 +2792,6 @@ bool Heap::CreateInitialObjects() {
}
set_natives_source_cache(FixedArray::cast(obj));
- // Allocate object to hold object observation state.
- { MaybeObject* maybe_obj = AllocateMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- { MaybeObject* maybe_obj = AllocateJSObjectFromMap(Map::cast(obj));
- if (!maybe_obj->ToObject(&obj)) return false;
- }
- set_observation_state(JSObject::cast(obj));
-
// Handling of script id generation is in FACTORY->NewScript.
set_last_script_id(undefined_value());
@@ -2892,34 +2811,6 @@ bool Heap::CreateInitialObjects() {
}
-bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
- RootListIndex writable_roots[] = {
- kStoreBufferTopRootIndex,
- kStackLimitRootIndex,
- kInstanceofCacheFunctionRootIndex,
- kInstanceofCacheMapRootIndex,
- kInstanceofCacheAnswerRootIndex,
- kCodeStubsRootIndex,
- kNonMonomorphicCacheRootIndex,
- kPolymorphicCodeCacheRootIndex,
- kLastScriptIdRootIndex,
- kEmptyScriptRootIndex,
- kRealStackLimitRootIndex,
- kArgumentsAdaptorDeoptPCOffsetRootIndex,
- kConstructStubDeoptPCOffsetRootIndex,
- kGetterStubDeoptPCOffsetRootIndex,
- kSetterStubDeoptPCOffsetRootIndex,
- kSymbolTableRootIndex,
- };
-
- for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
- if (root_index == writable_roots[i])
- return true;
- }
- return false;
-}
-
-
Object* RegExpResultsCache::Lookup(Heap* heap,
String* key_string,
Object* key_pattern,
@@ -3137,7 +3028,7 @@ MaybeObject* Heap::NumberToString(Object* number,
}
Object* js_string;
- MaybeObject* maybe_js_string = AllocateStringFromOneByte(CStrVector(str));
+ MaybeObject* maybe_js_string = AllocateStringFromAscii(CStrVector(str));
if (maybe_js_string->ToObject(&js_string)) {
SetNumberStringCache(number, String::cast(js_string));
}
@@ -3311,10 +3202,10 @@ MUST_USE_RESULT static inline MaybeObject* MakeOrFindTwoCharacterString(
} else if ((c1 | c2) <= String::kMaxAsciiCharCodeU) { // We can do this
ASSERT(IsPowerOf2(String::kMaxAsciiCharCodeU + 1)); // because of this.
Object* result;
- { MaybeObject* maybe_result = heap->AllocateRawOneByteString(2);
+ { MaybeObject* maybe_result = heap->AllocateRawAsciiString(2);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
- char* dest = SeqOneByteString::cast(result)->GetChars();
+ char* dest = SeqAsciiString::cast(result)->GetChars();
dest[0] = c1;
dest[1] = c2;
return result;
@@ -3353,8 +3244,8 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
return MakeOrFindTwoCharacterString(this, c1, c2);
}
- bool first_is_ascii = first->IsOneByteRepresentation();
- bool second_is_ascii = second->IsOneByteRepresentation();
+ bool first_is_ascii = first->IsAsciiRepresentation();
+ bool second_is_ascii = second->IsAsciiRepresentation();
bool is_ascii = first_is_ascii && second_is_ascii;
// Make sure that an out of memory exception is thrown if the length
@@ -3384,35 +3275,35 @@ MaybeObject* Heap::AllocateConsString(String* first, String* second) {
ASSERT(second->IsFlat());
if (is_ascii) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
+ { MaybeObject* maybe_result = AllocateRawAsciiString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqOneByteString::cast(result)->GetChars();
+ char* dest = SeqAsciiString::cast(result)->GetChars();
// Copy first part.
const char* src;
if (first->IsExternalString()) {
src = ExternalAsciiString::cast(first)->GetChars();
} else {
- src = SeqOneByteString::cast(first)->GetChars();
+ src = SeqAsciiString::cast(first)->GetChars();
}
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
if (second->IsExternalString()) {
src = ExternalAsciiString::cast(second)->GetChars();
} else {
- src = SeqOneByteString::cast(second)->GetChars();
+ src = SeqAsciiString::cast(second)->GetChars();
}
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result;
} else {
if (is_ascii_data_in_two_byte_string) {
Object* result;
- { MaybeObject* maybe_result = AllocateRawOneByteString(length);
+ { MaybeObject* maybe_result = AllocateRawAsciiString(length);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- char* dest = SeqOneByteString::cast(result)->GetChars();
+ char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length);
String::WriteToFlat(second, dest + first_length, 0, second_length);
isolate_->counters()->string_add_runtime_ext_to_ascii()->Increment();
@@ -3479,17 +3370,17 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// WriteToFlat takes care of the case when an indirect string has a
// different encoding from its underlying string. These encodings may
// differ because of externalization.
- bool is_ascii = buffer->IsOneByteRepresentation();
+ bool is_ascii = buffer->IsAsciiRepresentation();
{ MaybeObject* maybe_result = is_ascii
- ? AllocateRawOneByteString(length, pretenure)
+ ? AllocateRawAsciiString(length, pretenure)
: AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
String* string_result = String::cast(result);
// Copy the characters into the new object.
if (is_ascii) {
- ASSERT(string_result->IsOneByteRepresentation());
- char* dest = SeqOneByteString::cast(string_result)->GetChars();
+ ASSERT(string_result->IsAsciiRepresentation());
+ char* dest = SeqAsciiString::cast(string_result)->GetChars();
String::WriteToFlat(buffer, dest, start, end);
} else {
ASSERT(string_result->IsTwoByteRepresentation());
@@ -3513,7 +3404,7 @@ MaybeObject* Heap::AllocateSubString(String* buffer,
// indirect ASCII string is pointing to a two-byte string, the two-byte char
// codes of the underlying string must still fit into ASCII (because
// externalization must not change char codes).
- { Map* map = buffer->IsOneByteRepresentation()
+ { Map* map = buffer->IsAsciiRepresentation()
? sliced_ascii_string_map()
: sliced_string_map();
MaybeObject* maybe_result = Allocate(map, NEW_SPACE);
@@ -3748,11 +3639,10 @@ MaybeObject* Heap::CreateCode(const CodeDesc& desc,
code->set_check_type(RECEIVER_MAP_CHECK);
}
code->set_deoptimization_data(empty_fixed_array(), SKIP_WRITE_BARRIER);
- code->InitializeTypeFeedbackInfoNoWriteBarrier(undefined_value());
+ code->set_type_feedback_info(undefined_value(), SKIP_WRITE_BARRIER);
code->set_handler_table(empty_fixed_array(), SKIP_WRITE_BARRIER);
code->set_gc_metadata(Smi::FromInt(0));
code->set_ic_age(global_ic_age_);
- code->set_prologue_offset(kPrologueOffsetNotSet);
// Allow self references to created code object by patching the handle to
// point to the newly allocated Code object.
if (!self_reference.is_null()) {
@@ -4143,7 +4033,7 @@ MaybeObject* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
InitializeJSObjectFromMap(JSObject::cast(obj),
FixedArray::cast(properties),
map);
- ASSERT(JSObject::cast(obj)->HasFastElements());
+ ASSERT(JSObject::cast(obj)->HasFastSmiOrObjectElements());
return obj;
}
@@ -4193,6 +4083,9 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
ArrayStorageAllocationMode mode,
PretenureFlag pretenure) {
ASSERT(capacity >= length);
+ if (length != 0 && mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
@@ -4205,7 +4098,7 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
FixedArrayBase* elms;
MaybeObject* maybe_elms = NULL;
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
maybe_elms = AllocateUninitializedFixedDoubleArray(capacity);
} else {
@@ -4232,14 +4125,13 @@ MaybeObject* Heap::AllocateJSArrayAndStorage(
MaybeObject* Heap::AllocateJSArrayWithElements(
FixedArrayBase* elements,
ElementsKind elements_kind,
- int length,
PretenureFlag pretenure) {
MaybeObject* maybe_array = AllocateJSArray(elements_kind, pretenure);
JSArray* array;
if (!maybe_array->To(&array)) return maybe_array;
array->set_elements(elements);
- array->set_length(Smi::FromInt(length));
+ array->set_length(Smi::FromInt(elements->length()));
array->ValidateElements();
return array;
}
@@ -4517,7 +4409,7 @@ MaybeObject* Heap::ReinitializeJSGlobalProxy(JSFunction* constructor,
}
-MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
+MaybeObject* Heap::AllocateStringFromAscii(Vector<const char> string,
PretenureFlag pretenure) {
int length = string.length();
if (length == 1) {
@@ -4525,12 +4417,12 @@ MaybeObject* Heap::AllocateStringFromOneByte(Vector<const char> string,
}
Object* result;
{ MaybeObject* maybe_result =
- AllocateRawOneByteString(string.length(), pretenure);
+ AllocateRawAsciiString(string.length(), pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
}
// Copy the characters into the new object.
- CopyChars(SeqOneByteString::cast(result)->GetChars(), string.start(), length);
+ CopyChars(SeqAsciiString::cast(result)->GetChars(), string.start(), length);
return result;
}
@@ -4583,9 +4475,9 @@ MaybeObject* Heap::AllocateStringFromTwoByte(Vector<const uc16> string,
const uc16* start = string.start();
if (String::IsAscii(start, length)) {
- MaybeObject* maybe_result = AllocateRawOneByteString(length, pretenure);
+ MaybeObject* maybe_result = AllocateRawAsciiString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
- CopyChars(SeqOneByteString::cast(result)->GetChars(), start, length);
+ CopyChars(SeqAsciiString::cast(result)->GetChars(), start, length);
} else { // It's not an ASCII string.
MaybeObject* maybe_result = AllocateRawTwoByteString(length, pretenure);
if (!maybe_result->ToObject(&result)) return maybe_result;
@@ -4640,11 +4532,11 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Map* map;
if (is_ascii) {
- if (chars > SeqOneByteString::kMaxLength) {
+ if (chars > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
map = ascii_symbol_map();
- size = SeqOneByteString::SizeFor(chars);
+ size = SeqAsciiString::SizeFor(chars);
} else {
if (chars > SeqTwoByteString::kMaxLength) {
return Failure::OutOfMemoryException();
@@ -4684,14 +4576,13 @@ MaybeObject* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
}
-MaybeObject* Heap::AllocateRawOneByteString(int length,
- PretenureFlag pretenure) {
- if (length < 0 || length > SeqOneByteString::kMaxLength) {
+MaybeObject* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
+ if (length < 0 || length > SeqAsciiString::kMaxLength) {
return Failure::OutOfMemoryException();
}
- int size = SeqOneByteString::SizeFor(length);
- ASSERT(size <= SeqOneByteString::kMaxSize);
+ int size = SeqAsciiString::SizeFor(length);
+ ASSERT(size <= SeqAsciiString::kMaxSize);
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
AllocationSpace retry_space = OLD_DATA_SPACE;
@@ -4723,7 +4614,7 @@ MaybeObject* Heap::AllocateRawOneByteString(int length,
if (FLAG_verify_heap) {
// Initialize string's content to ensure ASCII-ness (character range 0-127)
// as required when verifying the heap.
- char* dest = SeqOneByteString::cast(result)->GetChars();
+ char* dest = SeqAsciiString::cast(result)->GetChars();
memset(dest, 0x0F, length * kCharSize);
}
#endif
@@ -5099,7 +4990,7 @@ MaybeObject* Heap::AllocateModuleContext(ScopeInfo* scope_info) {
}
Context* context = reinterpret_cast<Context*>(result);
context->set_map_no_write_barrier(module_context_map());
- // Instance link will be set later.
+ // Context links will be set later.
context->set_extension(Smi::FromInt(0));
return context;
}
@@ -5186,20 +5077,6 @@ MaybeObject* Heap::AllocateScopeInfo(int length) {
}
-MaybeObject* Heap::AllocateExternal(void* value) {
- Foreign* foreign;
- { MaybeObject* maybe_result = AllocateForeign(static_cast<Address>(value));
- if (!maybe_result->To(&foreign)) return maybe_result;
- }
- JSObject* external;
- { MaybeObject* maybe_result = AllocateJSObjectFromMap(external_map());
- if (!maybe_result->To(&external)) return maybe_result;
- }
- external->SetInternalField(0, foreign);
- return external;
-}
-
-
MaybeObject* Heap::AllocateStruct(InstanceType type) {
Map* map;
switch (type) {
@@ -5289,6 +5166,10 @@ bool Heap::IdleNotification(int hint) {
AdvanceIdleIncrementalMarking(step_size);
contexts_disposed_ = 0;
}
+ // Make sure that we have no pending context disposals.
+ // Take into account that we might have decided to delay full collection
+ // because incremental marking is in progress.
+ ASSERT((contexts_disposed_ == 0) || !incremental_marking()->IsStopped());
// After context disposal there is likely a lot of garbage remaining, reset
// the idle notification counters in order to trigger more incremental GCs
// on subsequent idle notifications.
@@ -5580,7 +5461,7 @@ MaybeObject* Heap::LookupAsciiSymbol(Vector<const char> string) {
}
-MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqOneByteString> string,
+MaybeObject* Heap::LookupAsciiSymbol(Handle<SeqAsciiString> string,
int from,
int length) {
Object* symbol = NULL;
@@ -5639,7 +5520,6 @@ bool Heap::LookupSymbolIfExists(String* string, String** symbol) {
return symbol_table()->LookupSymbolIfExists(string, symbol);
}
-
void Heap::ZapFromSpace() {
NewSpacePageIterator it(new_space_.FromSpaceStart(),
new_space_.FromSpaceEnd());
@@ -6081,6 +5961,172 @@ intptr_t Heap::PromotedExternalMemorySize() {
- amount_of_external_allocated_memory_at_last_global_gc_;
}
+#ifdef DEBUG
+
+// Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+static const int kMarkTag = 2;
+
+
+class HeapDebugUtils {
+ public:
+ explicit HeapDebugUtils(Heap* heap)
+ : search_for_any_global_(false),
+ search_target_(NULL),
+ found_target_(false),
+ object_stack_(20),
+ heap_(heap) {
+ }
+
+ class MarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit MarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ void MarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (!map->IsHeapObject()) return; // visited before
+
+ if (found_target_) return; // stop if target found
+ object_stack_.Add(obj);
+ if ((search_for_any_global_ && obj->IsJSGlobalObject()) ||
+ (!search_for_any_global_ && (obj == search_target_))) {
+ found_target_ = true;
+ return;
+ }
+
+ // not visited yet
+ Map* map_p = reinterpret_cast<Map*>(HeapObject::cast(map));
+
+ Address map_addr = map_p->address();
+
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_addr + kMarkTag));
+
+ MarkObjectRecursively(&map);
+
+ MarkObjectVisitor mark_visitor(this);
+
+ obj->IterateBody(map_p->instance_type(), obj->SizeFromMap(map_p),
+ &mark_visitor);
+
+ if (!found_target_) // don't pop if found the target
+ object_stack_.RemoveLast();
+ }
+
+
+ class UnmarkObjectVisitor : public ObjectVisitor {
+ public:
+ explicit UnmarkObjectVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Copy all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->UnmarkObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+
+ void UnmarkObjectRecursively(Object** p) {
+ if (!(*p)->IsHeapObject()) return;
+
+ HeapObject* obj = HeapObject::cast(*p);
+
+ Object* map = obj->map();
+
+ if (map->IsHeapObject()) return; // unmarked already
+
+ Address map_addr = reinterpret_cast<Address>(map);
+
+ map_addr -= kMarkTag;
+
+ ASSERT_TAG_ALIGNED(map_addr);
+
+ HeapObject* map_p = HeapObject::FromAddress(map_addr);
+
+ obj->set_map_no_write_barrier(reinterpret_cast<Map*>(map_p));
+
+ UnmarkObjectRecursively(reinterpret_cast<Object**>(&map_p));
+
+ UnmarkObjectVisitor unmark_visitor(this);
+
+ obj->IterateBody(Map::cast(map_p)->instance_type(),
+ obj->SizeFromMap(Map::cast(map_p)),
+ &unmark_visitor);
+ }
+
+
+ void MarkRootObjectRecursively(Object** root) {
+ if (search_for_any_global_) {
+ ASSERT(search_target_ == NULL);
+ } else {
+ ASSERT(search_target_->IsHeapObject());
+ }
+ found_target_ = false;
+ object_stack_.Clear();
+
+ MarkObjectRecursively(root);
+ UnmarkObjectRecursively(root);
+
+ if (found_target_) {
+ PrintF("=====================================\n");
+ PrintF("==== Path to object ====\n");
+ PrintF("=====================================\n\n");
+
+ ASSERT(!object_stack_.is_empty());
+ for (int i = 0; i < object_stack_.length(); i++) {
+ if (i > 0) PrintF("\n |\n |\n V\n\n");
+ Object* obj = object_stack_[i];
+ obj->Print();
+ }
+ PrintF("=====================================\n");
+ }
+ }
+
+ // Helper class for visiting HeapObjects recursively.
+ class MarkRootVisitor: public ObjectVisitor {
+ public:
+ explicit MarkRootVisitor(HeapDebugUtils* utils) : utils_(utils) { }
+
+ void VisitPointers(Object** start, Object** end) {
+ // Visit all HeapObject pointers in [start, end)
+ for (Object** p = start; p < end; p++) {
+ if ((*p)->IsHeapObject())
+ utils_->MarkRootObjectRecursively(p);
+ }
+ }
+
+ HeapDebugUtils* utils_;
+ };
+
+ bool search_for_any_global_;
+ Object* search_target_;
+ bool found_target_;
+ List<Object*> object_stack_;
+ Heap* heap_;
+
+ friend class Heap;
+};
+
+#endif
+
V8_DECLARE_ONCE(initialize_gc_once);
@@ -6093,6 +6139,7 @@ static void InitializeGCOnce() {
bool Heap::SetUp(bool create_heap_objects) {
#ifdef DEBUG
allocation_timeout_ = FLAG_gc_interval;
+ debug_utils_ = new HeapDebugUtils(this);
#endif
// Initialize heap spaces and initial maps and objects. Whenever something
@@ -6287,6 +6334,11 @@ void Heap::TearDown() {
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
+
+#ifdef DEBUG
+ delete debug_utils_;
+ debug_utils_ = NULL;
+#endif
}
@@ -6865,9 +6917,6 @@ GCTracer::GCTracer(Heap* heap,
allocated_since_last_gc_(0),
spent_in_mutator_(0),
promoted_objects_size_(0),
- nodes_died_in_new_space_(0),
- nodes_copied_in_new_space_(0),
- nodes_promoted_(0),
heap_(heap),
gc_reason_(gc_reason),
collector_reason_(collector_reason) {
@@ -7008,9 +7057,6 @@ GCTracer::~GCTracer() {
PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
PrintF("promoted=%" V8_PTR_PREFIX "d ", promoted_objects_size_);
- PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
- PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
- PrintF("nodes_promoted=%d ", nodes_promoted_);
if (collector_ == SCAVENGER) {
PrintF("stepscount=%d ", steps_count_since_last_gc_);
@@ -7018,7 +7064,6 @@ GCTracer::~GCTracer() {
} else {
PrintF("stepscount=%d ", steps_count_);
PrintF("stepstook=%d ", static_cast<int>(steps_took_));
- PrintF("longeststep=%.f ", longest_step_);
}
PrintF("\n");
@@ -7139,7 +7184,7 @@ void TranscendentalCache::Clear() {
void ExternalStringTable::CleanUp() {
int last = 0;
for (int i = 0; i < new_space_strings_.length(); ++i) {
- if (new_space_strings_[i] == heap_->the_hole_value()) {
+ if (new_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
continue;
}
if (heap_->InNewSpace(new_space_strings_[i])) {
@@ -7151,7 +7196,7 @@ void ExternalStringTable::CleanUp() {
new_space_strings_.Rewind(last);
last = 0;
for (int i = 0; i < old_space_strings_.length(); ++i) {
- if (old_space_strings_[i] == heap_->the_hole_value()) {
+ if (old_space_strings_[i] == heap_->raw_unchecked_the_hole_value()) {
continue;
}
ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
diff --git a/deps/v8/src/heap.h b/deps/v8/src/heap.h
index 72035cadc..0ab7ae0bd 100644
--- a/deps/v8/src/heap.h
+++ b/deps/v8/src/heap.h
@@ -154,9 +154,7 @@ namespace internal {
V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset) \
V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset) \
- V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset) \
- V(JSObject, observation_state, ObservationState) \
- V(Map, external_map, ExternalMap)
+ V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
#define ROOT_LIST(V) \
STRONG_ROOT_LIST(V) \
@@ -286,6 +284,14 @@ class StoreBufferRebuilder {
+// The all static Heap captures the interface to the global object heap.
+// All JavaScript contexts by this process share the same object heap.
+
+#ifdef DEBUG
+class HeapDebugUtils;
+#endif
+
+
// A queue of objects promoted during scavenge. Each object is accompanied
// by it's size to avoid dereferencing a map pointer for scanning.
class PromotionQueue {
@@ -481,9 +487,6 @@ class Heap {
// Returns the amount of executable memory currently committed for the heap.
intptr_t CommittedMemoryExecutable();
- // Returns the amount of phyical memory currently committed for the heap.
- size_t CommittedPhysicalMemory();
-
// Returns the available bytes in space w/o growing.
// Heap doesn't guarantee that it can allocate an object that requires
// all available bytes. Check MaxHeapObjectSize() instead.
@@ -576,7 +579,6 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateJSArrayWithElements(
FixedArrayBase* array_base,
ElementsKind elements_kind,
- int length,
PretenureFlag pretenure = NOT_TENURED);
// Allocates and initializes a new global object based on a constructor.
@@ -659,9 +661,6 @@ class Heap {
// Allocates a serialized scope info.
MUST_USE_RESULT MaybeObject* AllocateScopeInfo(int length);
- // Allocates an External object for v8's external API.
- MUST_USE_RESULT MaybeObject* AllocateExternal(void* value);
-
// Allocates an empty PolymorphicCodeCache.
MUST_USE_RESULT MaybeObject* AllocatePolymorphicCodeCache();
@@ -698,7 +697,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateStringFromOneByte(
+ MUST_USE_RESULT MaybeObject* AllocateStringFromAscii(
Vector<const char> str,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT inline MaybeObject* AllocateStringFromUtf8(
@@ -742,7 +741,7 @@ class Heap {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed.
// Please note this does not perform a garbage collection.
- MUST_USE_RESULT MaybeObject* AllocateRawOneByteString(
+ MUST_USE_RESULT MaybeObject* AllocateRawAsciiString(
int length,
PretenureFlag pretenure = NOT_TENURED);
MUST_USE_RESULT MaybeObject* AllocateRawTwoByteString(
@@ -1037,8 +1036,9 @@ class Heap {
return LookupSymbol(CStrVector(str));
}
MUST_USE_RESULT MaybeObject* LookupSymbol(String* str);
- MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(
- Handle<SeqOneByteString> string, int from, int length);
+ MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Handle<SeqAsciiString> string,
+ int from,
+ int length);
bool LookupSymbolIfExists(String* str, String** symbol);
bool LookupTwoCharsSymbolIfExists(String* str, String** symbol);
@@ -1448,10 +1448,6 @@ class Heap {
STATIC_CHECK(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
STATIC_CHECK(kempty_symbolRootIndex == Internals::kEmptySymbolRootIndex);
- // Generated code can embed direct references to non-writable roots if
- // they are in new space.
- static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
-
MUST_USE_RESULT MaybeObject* NumberToString(
Object* number, bool check_number_string_cache = true);
MUST_USE_RESULT MaybeObject* Uint32ToString(
@@ -1785,6 +1781,8 @@ class Heap {
// Do we expect to be able to handle allocation failure at this
// time?
bool disallow_allocation_failure_;
+
+ HeapDebugUtils* debug_utils_;
#endif // DEBUG
// Indicates that the new space should be kept small due to high promotion
@@ -1901,6 +1899,7 @@ class Heap {
bool PerformGarbageCollection(GarbageCollector collector,
GCTracer* tracer);
+
inline void UpdateOldSpaceLimits();
// Allocate an uninitialized object in map space. The behavior is identical
@@ -1927,9 +1926,9 @@ class Heap {
void CreateFixedStubs();
- MUST_USE_RESULT MaybeObject* CreateOddball(const char* to_string,
- Object* to_number,
- byte kind);
+ MaybeObject* CreateOddball(const char* to_string,
+ Object* to_number,
+ byte kind);
// Allocate a JSArray with no elements
MUST_USE_RESULT MaybeObject* AllocateJSArray(
@@ -2549,18 +2548,6 @@ class GCTracer BASE_EMBEDDED {
promoted_objects_size_ += object_size;
}
- void increment_nodes_died_in_new_space() {
- nodes_died_in_new_space_++;
- }
-
- void increment_nodes_copied_in_new_space() {
- nodes_copied_in_new_space_++;
- }
-
- void increment_nodes_promoted() {
- nodes_promoted_++;
- }
-
private:
// Returns a string matching the collector.
const char* CollectorString();
@@ -2605,15 +2592,6 @@ class GCTracer BASE_EMBEDDED {
// Size of objects promoted during the current collection.
intptr_t promoted_objects_size_;
- // Number of died nodes in the new space.
- int nodes_died_in_new_space_;
-
- // Number of copied nodes to the new space.
- int nodes_copied_in_new_space_;
-
- // Number of promoted nodes to the old space.
- int nodes_promoted_;
-
// Incremental marking steps counters.
int steps_count_;
double steps_took_;
diff --git a/deps/v8/src/hydrogen-instructions.cc b/deps/v8/src/hydrogen-instructions.cc
index c1245b2fb..939b4f497 100644
--- a/deps/v8/src/hydrogen-instructions.cc
+++ b/deps/v8/src/hydrogen-instructions.cc
@@ -85,81 +85,6 @@ void HValue::AssumeRepresentation(Representation r) {
}
-void HValue::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
-}
-
-
-Representation HValue::RepresentationFromUses() {
- if (HasNoUses()) return Representation::None();
-
- // Array of use counts for each representation.
- int use_count[Representation::kNumRepresentations] = { 0 };
-
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- HValue* use = it.value();
- Representation rep = use->observed_input_representation(it.index());
- if (rep.IsNone()) continue;
- if (FLAG_trace_representation) {
- PrintF("#%d %s is used by #%d %s as %s%s\n",
- id(), Mnemonic(), use->id(), use->Mnemonic(), rep.Mnemonic(),
- (use->CheckFlag(kTruncatingToInt32) ? "-trunc" : ""));
- }
- use_count[rep.kind()] += use->LoopWeight();
- }
- if (IsPhi()) HPhi::cast(this)->AddIndirectUsesTo(&use_count[0]);
- int tagged_count = use_count[Representation::kTagged];
- int double_count = use_count[Representation::kDouble];
- int int32_count = use_count[Representation::kInteger32];
-
- if (tagged_count > 0) return Representation::Tagged();
- if (double_count > 0) return Representation::Double();
- if (int32_count > 0) return Representation::Integer32();
-
- return Representation::None();
-}
-
-
-void HValue::UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason) {
- Representation r = representation();
- if (new_rep.is_more_general_than(r)) {
- // When an HConstant is marked "not convertible to integer", then
- // never try to represent it as an integer.
- if (new_rep.IsInteger32() && !IsConvertibleToInteger()) {
- new_rep = Representation::Tagged();
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s because it's NCTI"
- " (%s want i)\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- } else {
- if (FLAG_trace_representation) {
- PrintF("Changing #%d %s representation %s -> %s based on %s\n",
- id(), Mnemonic(), r.Mnemonic(), new_rep.Mnemonic(), reason);
- }
- }
- ChangeRepresentation(new_rep);
- AddDependantsToWorklist(h_infer);
- }
-}
-
-
-void HValue::AddDependantsToWorklist(HInferRepresentation* h_infer) {
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- h_infer->AddToWorklist(it.value());
- }
- for (int i = 0; i < OperandCount(); ++i) {
- h_infer->AddToWorklist(OperandAt(i));
- }
-}
-
-
static int32_t ConvertAndSetOverflow(int64_t result, bool* overflow) {
if (result > kMaxInt) {
*overflow = true;
@@ -376,7 +301,6 @@ HUseListNode* HUseListNode::tail() {
bool HValue::CheckUsesForFlag(Flag f) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- if (it.value()->IsSimulate()) continue;
if (!it.value()->CheckFlag(f)) return false;
}
return true;
@@ -801,13 +725,6 @@ void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
}
-void HWrapReceiver::PrintDataTo(StringStream* stream) {
- receiver()->PrintNameTo(stream);
- stream->Add(" ");
- function()->PrintNameTo(stream);
-}
-
-
void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
arguments()->PrintNameTo(stream);
stream->Add("[");
@@ -847,24 +764,6 @@ void HReturn::PrintDataTo(StringStream* stream) {
}
-Representation HBranch::observed_input_representation(int index) {
- static const ToBooleanStub::Types tagged_types(
- ToBooleanStub::UNDEFINED |
- ToBooleanStub::NULL_TYPE |
- ToBooleanStub::SPEC_OBJECT |
- ToBooleanStub::STRING);
- if (expected_input_types_.ContainsAnyOf(tagged_types)) {
- return Representation::Tagged();
- } else if (expected_input_types_.Contains(ToBooleanStub::HEAP_NUMBER)) {
- return Representation::Double();
- } else if (expected_input_types_.Contains(ToBooleanStub::SMI)) {
- return Representation::Integer32();
- } else {
- return Representation::None();
- }
-}
-
-
void HCompareMap::PrintDataTo(StringStream* stream) {
value()->PrintNameTo(stream);
stream->Add(" (%p)", *map());
@@ -960,6 +859,16 @@ void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
}
+HValue* HConstant::Canonicalize() {
+ return HasNoUses() ? NULL : this;
+}
+
+
+HValue* HTypeof::Canonicalize() {
+ return HasNoUses() ? NULL : this;
+}
+
+
HValue* HBitwise::Canonicalize() {
if (!representation().IsInteger32()) return this;
// If x is an int32, then x & -1 == x, x | 0 == x and x ^ 0 == x.
@@ -1440,11 +1349,15 @@ void HPhi::InitRealUses(int phi_id) {
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* value = it.value();
if (!value->IsPhi()) {
- Representation rep = value->observed_input_representation(it.index());
+ Representation rep = value->ObservedInputRepresentation(it.index());
non_phi_uses_[rep.kind()] += value->LoopWeight();
if (FLAG_trace_representation) {
- PrintF("#%d Phi is used by real #%d %s as %s\n",
- id(), value->id(), value->Mnemonic(), rep.Mnemonic());
+ PrintF("%d %s is used by %d %s as %s\n",
+ this->id(),
+ this->Mnemonic(),
+ value->id(),
+ value->Mnemonic(),
+ rep.Mnemonic());
}
}
}
@@ -1453,8 +1366,11 @@ void HPhi::InitRealUses(int phi_id) {
void HPhi::AddNonPhiUsesFrom(HPhi* other) {
if (FLAG_trace_representation) {
- PrintF("adding to #%d Phi uses of #%d Phi: i%d d%d t%d\n",
- id(), other->id(),
+ PrintF("adding to %d %s uses of %d %s: i%d d%d t%d\n",
+ this->id(),
+ this->Mnemonic(),
+ other->id(),
+ other->Mnemonic(),
other->non_phi_uses_[Representation::kInteger32],
other->non_phi_uses_[Representation::kDouble],
other->non_phi_uses_[Representation::kTagged]);
@@ -1473,20 +1389,9 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
-void HSimulate::MergeInto(HSimulate* other) {
- for (int i = 0; i < values_.length(); ++i) {
- HValue* value = values_[i];
- if (HasAssignedIndexAt(i)) {
- other->AddAssignedValue(GetAssignedIndexAt(i), value);
- } else {
- if (other->pop_count_ > 0) {
- other->pop_count_--;
- } else {
- other->AddPushedValue(value);
- }
- }
- }
- other->pop_count_ += pop_count();
+void HPhi::ResetInteger32Uses() {
+ non_phi_uses_[Representation::kInteger32] = 0;
+ indirect_uses_[Representation::kInteger32] = 0;
}
@@ -1495,7 +1400,7 @@ void HSimulate::PrintDataTo(StringStream* stream) {
if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
if (values_.length() > 0) {
if (pop_count_ > 0) stream->Add(" /");
- for (int i = values_.length() - 1; i >= 0; --i) {
+ for (int i = 0; i < values_.length(); ++i) {
if (i > 0) stream->Add(",");
if (HasAssignedIndexAt(i)) {
stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
@@ -1534,6 +1439,7 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
has_int32_value_(false),
has_double_value_(false) {
+ set_representation(r);
SetFlag(kUseGVN);
if (handle_->IsNumber()) {
double n = handle_->Number();
@@ -1542,16 +1448,6 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
double_value_ = n;
has_double_value_ = true;
}
- if (r.IsNone()) {
- if (has_int32_value_) {
- r = Representation::Integer32();
- } else if (has_double_value_) {
- r = Representation::Double();
- } else {
- r = Representation::Tagged();
- }
- }
- set_representation(r);
}
@@ -1650,60 +1546,6 @@ void HBinaryOperation::PrintDataTo(StringStream* stream) {
}
-void HBinaryOperation::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- // When the operation has information about its own output type, don't look
- // at uses.
- if (!observed_output_representation_.IsNone()) return;
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
-}
-
-
-Representation HBinaryOperation::RepresentationFromInputs() {
- // Determine the worst case of observed input representations and
- // the currently assumed output representation.
- Representation rep = representation();
- if (observed_output_representation_.is_more_general_than(rep)) {
- rep = observed_output_representation_;
- }
- for (int i = 1; i <= 2; ++i) {
- Representation input_rep = observed_input_representation(i);
- if (input_rep.is_more_general_than(rep)) rep = input_rep;
- }
- // If any of the actual input representation is more general than what we
- // have so far but not Tagged, use that representation instead.
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
-
- if (left_rep.is_more_general_than(rep) &&
- left()->CheckFlag(kFlexibleRepresentation)) {
- rep = left_rep;
- }
- if (right_rep.is_more_general_than(rep) &&
- right()->CheckFlag(kFlexibleRepresentation)) {
- rep = right_rep;
- }
- return rep;
-}
-
-
-void HBinaryOperation::AssumeRepresentation(Representation r) {
- set_observed_input_representation(r, r);
- HValue::AssumeRepresentation(r);
-}
-
-
-void HMathMinMax::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- Representation new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- // Do not care about uses.
-}
-
-
Range* HBitwise::InferRange(Zone* zone) {
if (op() == Token::BIT_XOR) return HValue::InferRange(zone);
const int32_t kDefaultMask = static_cast<int32_t>(0xffffffff);
@@ -1780,7 +1622,7 @@ Range* HShl::InferRange(Zone* zone) {
}
-Range* HLoadKeyed::InferRange(Zone* zone) {
+Range* HLoadKeyedSpecializedArrayElement::InferRange(Zone* zone) {
switch (elements_kind()) {
case EXTERNAL_PIXEL_ELEMENTS:
return new(zone) Range(0, 255);
@@ -1835,19 +1677,9 @@ void HGoto::PrintDataTo(StringStream* stream) {
}
-void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
- Representation rep = Representation::None();
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- bool observed_integers =
- observed_input_representation(0).IsInteger32() &&
- observed_input_representation(1).IsInteger32();
- bool inputs_are_not_doubles =
- !left_rep.IsDouble() && !right_rep.IsDouble();
- if (observed_integers && inputs_are_not_doubles) {
- rep = Representation::Integer32();
- } else {
- rep = Representation::Double();
+void HCompareIDAndBranch::SetInputRepresentation(Representation r) {
+ input_representation_ = r;
+ if (r.IsDouble()) {
// According to the ES5 spec (11.9.3, 11.8.5), Equality comparisons (==, ===
// and !=) have special handling of undefined, e.g. undefined == undefined
// is 'true'. Relational comparisons have a different semantic, first
@@ -1864,8 +1696,9 @@ void HCompareIDAndBranch::InferRepresentation(HInferRepresentation* h_infer) {
if (!Token::IsOrderedRelationalCompareOp(token_)) {
SetFlag(kDeoptimizeOnUndefined);
}
+ } else {
+ ASSERT(r.IsInteger32());
}
- ChangeRepresentation(rep);
}
@@ -2016,25 +1849,11 @@ void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void HLoadKeyed::PrintDataTo(StringStream* stream) {
- if (!is_external()) {
- elements()->PrintNameTo(stream);
- } else {
- ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- elements()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(ElementsKindToString(elements_kind()));
- }
-
+void HLoadKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- if (IsDehoisted()) {
- stream->Add(" + %d] ", index_offset());
- } else {
- stream->Add("] ");
- }
-
+ stream->Add("] ");
dependency()->PrintNameTo(stream);
if (RequiresHoleCheck()) {
stream->Add(" check_hole");
@@ -2042,26 +1861,29 @@ void HLoadKeyed::PrintDataTo(StringStream* stream) {
}
-bool HLoadKeyed::RequiresHoleCheck() const {
+bool HLoadKeyedFastElement::RequiresHoleCheck() const {
if (IsFastPackedElementsKind(elements_kind())) {
return false;
}
- if (IsFastDoubleElementsKind(elements_kind())) {
- return true;
- }
-
for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
HValue* use = it.value();
- if (!use->IsChange()) {
- return true;
- }
+ if (!use->IsChange()) return true;
}
return false;
}
+void HLoadKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
+}
+
+
void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add("[");
@@ -2074,22 +1896,21 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
// Recognize generic keyed loads that use property name generated
// by for-in statement as a key and rewrite them into fast property load
// by index.
- if (key()->IsLoadKeyed()) {
- HLoadKeyed* key_load = HLoadKeyed::cast(key());
- if (key_load->elements()->IsForInCacheArray()) {
+ if (key()->IsLoadKeyedFastElement()) {
+ HLoadKeyedFastElement* key_load = HLoadKeyedFastElement::cast(key());
+ if (key_load->object()->IsForInCacheArray()) {
HForInCacheArray* names_cache =
- HForInCacheArray::cast(key_load->elements());
+ HForInCacheArray::cast(key_load->object());
if (names_cache->enumerable() == object()) {
HForInCacheArray* index_cache =
names_cache->index_cache();
HCheckMapValue* map_check =
new(block()->zone()) HCheckMapValue(object(), names_cache->map());
- HInstruction* index = new(block()->zone()) HLoadKeyed(
+ HInstruction* index = new(block()->zone()) HLoadKeyedFastElement(
index_cache,
key_load->key(),
- key_load->key(),
- key_load->elements_kind());
+ key_load->key());
map_check->InsertBefore(this);
index->InsertBefore(this);
HLoadFieldByIndex* load = new(block()->zone()) HLoadFieldByIndex(
@@ -2104,6 +1925,56 @@ HValue* HLoadKeyedGeneric::Canonicalize() {
}
+void HLoadKeyedSpecializedArrayElement::PrintDataTo(
+ StringStream* stream) {
+ external_pointer()->PrintNameTo(stream);
+ stream->Add(".");
+ switch (elements_kind()) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ stream->Add("byte");
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ stream->Add("u_byte");
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ stream->Add("short");
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ stream->Add("u_short");
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ stream->Add("int");
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ stream->Add("u_int");
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ stream->Add("float");
+ break;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ stream->Add("double");
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ stream->Add("pixel");
+ break;
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] ");
+ dependency()->PrintNameTo(stream);
+}
+
+
void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
stream->Add(".");
@@ -2130,25 +2001,20 @@ void HStoreNamedField::PrintDataTo(StringStream* stream) {
}
-void HStoreKeyed::PrintDataTo(StringStream* stream) {
- if (!is_external()) {
- elements()->PrintNameTo(stream);
- } else {
- elements()->PrintNameTo(stream);
- stream->Add(".");
- stream->Add(ElementsKindToString(elements_kind()));
- ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
- elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
- }
-
+void HStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintNameTo(stream);
stream->Add("[");
key()->PrintNameTo(stream);
- if (IsDehoisted()) {
- stream->Add(" + %d] = ", index_offset());
- } else {
- stream->Add("] = ");
- }
+ stream->Add("] = ");
+ value()->PrintNameTo(stream);
+}
+
+void HStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
+ elements()->PrintNameTo(stream);
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] = ");
value()->PrintNameTo(stream);
}
@@ -2162,6 +2028,56 @@ void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
}
+void HStoreKeyedSpecializedArrayElement::PrintDataTo(
+ StringStream* stream) {
+ external_pointer()->PrintNameTo(stream);
+ stream->Add(".");
+ switch (elements_kind()) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ stream->Add("byte");
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ stream->Add("u_byte");
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ stream->Add("short");
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ stream->Add("u_short");
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ stream->Add("int");
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ stream->Add("u_int");
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ stream->Add("float");
+ break;
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ stream->Add("double");
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ stream->Add("pixel");
+ break;
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ stream->Add("[");
+ key()->PrintNameTo(stream);
+ stream->Add("] = ");
+ value()->PrintNameTo(stream);
+}
+
+
void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
object()->PrintNameTo(stream);
ElementsKind from_kind = original_map()->elements_kind();
@@ -2452,10 +2368,10 @@ HValue* HAdd::EnsureAndPropagateNotMinusZero(BitVector* visited) {
}
-bool HStoreKeyed::NeedsCanonicalization() {
- // If value is an integer or comes from the result of a keyed load
- // then it will be a non-hole value: no need for canonicalization.
- if (value()->IsLoadKeyed() ||
+bool HStoreKeyedFastDoubleElement::NeedsCanonicalization() {
+ // If value was loaded from unboxed double backing store or
+ // converted from an integer then we don't have to canonicalize it.
+ if (value()->IsLoadKeyedFastDoubleElement() ||
(value()->IsChange() && HChange::cast(value())->from().IsInteger32())) {
return false;
}
@@ -2638,41 +2554,7 @@ void HBitwise::PrintDataTo(StringStream* stream) {
}
-void HPhi::InferRepresentation(HInferRepresentation* h_infer) {
- ASSERT(CheckFlag(kFlexibleRepresentation));
- // If there are non-Phi uses, and all of them have observed the same
- // representation, than that's what this Phi is going to use.
- Representation new_rep = RepresentationObservedByAllNonPhiUses();
- if (!new_rep.IsNone()) {
- UpdateRepresentation(new_rep, h_infer, "unanimous use observations");
- return;
- }
- new_rep = RepresentationFromInputs();
- UpdateRepresentation(new_rep, h_infer, "inputs");
- new_rep = RepresentationFromUses();
- UpdateRepresentation(new_rep, h_infer, "uses");
- new_rep = RepresentationFromUseRequirements();
- UpdateRepresentation(new_rep, h_infer, "use requirements");
-}
-
-
-Representation HPhi::RepresentationObservedByAllNonPhiUses() {
- int non_phi_use_count = 0;
- for (int i = Representation::kInteger32;
- i < Representation::kNumRepresentations; ++i) {
- non_phi_use_count += non_phi_uses_[i];
- }
- if (non_phi_use_count <= 1) return Representation::None();
- for (int i = 0; i < Representation::kNumRepresentations; ++i) {
- if (non_phi_uses_[i] == non_phi_use_count) {
- return Representation::FromKind(static_cast<Representation::Kind>(i));
- }
- }
- return Representation::None();
-}
-
-
-Representation HPhi::RepresentationFromInputs() {
+Representation HPhi::InferredRepresentation() {
bool double_occurred = false;
bool int32_occurred = false;
for (int i = 0; i < OperandCount(); ++i) {
@@ -2681,7 +2563,6 @@ Representation HPhi::RepresentationFromInputs() {
HPhi* hint_value = HUnknownOSRValue::cast(value)->incoming_value();
if (hint_value != NULL) {
Representation hint = hint_value->representation();
- if (hint.IsTagged()) return hint;
if (hint.IsDouble()) double_occurred = true;
if (hint.IsInteger32()) int32_occurred = true;
}
@@ -2700,9 +2581,7 @@ Representation HPhi::RepresentationFromInputs() {
return Representation::Tagged();
}
} else {
- if (value->IsPhi() && !IsConvertibleToInteger()) {
- return Representation::Tagged();
- }
+ return Representation::Tagged();
}
}
}
@@ -2715,37 +2594,6 @@ Representation HPhi::RepresentationFromInputs() {
}
-Representation HPhi::RepresentationFromUseRequirements() {
- Representation all_uses_require = Representation::None();
- bool all_uses_require_the_same = true;
- for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
- // We check for observed_input_representation elsewhere.
- Representation use_rep =
- it.value()->RequiredInputRepresentation(it.index());
- // No useful info from this use -> look at the next one.
- if (use_rep.IsNone()) {
- continue;
- }
- if (use_rep.Equals(all_uses_require)) {
- continue;
- }
- // This use's representation contradicts what we've seen so far.
- if (!all_uses_require.IsNone()) {
- ASSERT(!use_rep.Equals(all_uses_require));
- all_uses_require_the_same = false;
- break;
- }
- // Otherwise, initialize observed representation.
- all_uses_require = use_rep;
- }
- if (all_uses_require_the_same) {
- return all_uses_require;
- }
-
- return Representation::None();
-}
-
-
// Node-specific verification code is only included in debug mode.
#ifdef DEBUG
@@ -2784,6 +2632,12 @@ void HCheckFunction::Verify() {
ASSERT(HasNoUses());
}
+
+void HCheckPrototypeMaps::Verify() {
+ HInstruction::Verify();
+ ASSERT(HasNoUses());
+}
+
#endif
} } // namespace v8::internal
diff --git a/deps/v8/src/hydrogen-instructions.h b/deps/v8/src/hydrogen-instructions.h
index 7225791cd..9e6344cd5 100644
--- a/deps/v8/src/hydrogen-instructions.h
+++ b/deps/v8/src/hydrogen-instructions.h
@@ -45,7 +45,6 @@ namespace internal {
// Forward declarations.
class HBasicBlock;
class HEnvironment;
-class HInferRepresentation;
class HInstruction;
class HLoopInformation;
class HValue;
@@ -134,8 +133,10 @@ class LChunkBuilder;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
@@ -153,9 +154,7 @@ class LChunkBuilder;
V(Random) \
V(RegExpLiteral) \
V(Return) \
- V(Ror) \
V(Sar) \
- V(SeqStringSetChar) \
V(Shl) \
V(Shr) \
V(Simulate) \
@@ -164,8 +163,10 @@ class LChunkBuilder;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -310,9 +311,9 @@ class Representation {
public:
enum Kind {
kNone,
- kInteger32,
- kDouble,
kTagged,
+ kDouble,
+ kInteger32,
kExternal,
kNumRepresentations
};
@@ -325,18 +326,10 @@ class Representation {
static Representation Double() { return Representation(kDouble); }
static Representation External() { return Representation(kExternal); }
- static Representation FromKind(Kind kind) { return Representation(kind); }
-
bool Equals(const Representation& other) {
return kind_ == other.kind_;
}
- bool is_more_general_than(const Representation& other) {
- ASSERT(kind_ != kExternal);
- ASSERT(other.kind_ != kExternal);
- return kind_ > other.kind_;
- }
-
Kind kind() const { return static_cast<Kind>(kind_); }
bool IsNone() const { return kind_ == kNone; }
bool IsTagged() const { return kind_ == kTagged; }
@@ -639,15 +632,13 @@ class HValue: public ZoneObject {
virtual bool EmitAtUses() { return false; }
Representation representation() const { return representation_; }
void ChangeRepresentation(Representation r) {
+ // Representation was already set and is allowed to be changed.
+ ASSERT(!r.IsNone());
ASSERT(CheckFlag(kFlexibleRepresentation));
RepresentationChanged(r);
representation_ = r;
- if (r.IsTagged()) {
- // Tagged is the bottom of the lattice, don't go any further.
- ClearFlag(kFlexibleRepresentation);
- }
}
- virtual void AssumeRepresentation(Representation r);
+ void AssumeRepresentation(Representation r);
virtual bool IsConvertibleToInteger() const { return true; }
@@ -745,11 +736,16 @@ class HValue: public ZoneObject {
void ComputeInitialRange(Zone* zone);
// Representation helpers.
- virtual Representation observed_input_representation(int index) {
- return Representation::None();
- }
virtual Representation RequiredInputRepresentation(int index) = 0;
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+
+ virtual Representation InferredRepresentation() {
+ return representation();
+ }
+
+ // Type feedback access.
+ virtual Representation ObservedInputRepresentation(int index) {
+ return RequiredInputRepresentation(index);
+ }
// This gives the instruction an opportunity to replace itself with an
// instruction that does the same in some better way. To replace an
@@ -797,18 +793,7 @@ class HValue: public ZoneObject {
UNREACHABLE();
return false;
}
-
- virtual Representation RepresentationFromInputs() {
- return representation();
- }
- Representation RepresentationFromUses();
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason);
- void AddDependantsToWorklist(HInferRepresentation* h_infer);
-
virtual void RepresentationChanged(Representation to) { }
-
virtual Range* InferRange(Zone* zone);
virtual void DeleteFromGraph() = 0;
virtual void InternalSetOperandAt(int index, HValue* value) = 0;
@@ -818,6 +803,7 @@ class HValue: public ZoneObject {
}
void set_representation(Representation r) {
+ // Representation is set-once.
ASSERT(representation_.IsNone() && !r.IsNone());
representation_ = r;
}
@@ -1130,7 +1116,6 @@ class HBranch: public HUnaryControlInstruction {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::None();
}
- virtual Representation observed_input_representation(int index);
ToBooleanStub::Types expected_input_types() const {
return expected_input_types_;
@@ -1335,24 +1320,14 @@ class HClampToUint8: public HUnaryOperation {
};
-enum RemovableSimulate {
- REMOVABLE_SIMULATE,
- FIXED_SIMULATE
-};
-
-
class HSimulate: public HInstruction {
public:
- HSimulate(BailoutId ast_id,
- int pop_count,
- Zone* zone,
- RemovableSimulate removable)
+ HSimulate(BailoutId ast_id, int pop_count, Zone* zone)
: ast_id_(ast_id),
pop_count_(pop_count),
values_(2, zone),
assigned_indexes_(2, zone),
- zone_(zone),
- removable_(removable) {}
+ zone_(zone) {}
virtual ~HSimulate() {}
virtual void PrintDataTo(StringStream* stream);
@@ -1386,9 +1361,6 @@ class HSimulate: public HInstruction {
return Representation::None();
}
- void MergeInto(HSimulate* other);
- bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
-
DECLARE_CONCRETE_INSTRUCTION(Simulate)
#ifdef DEBUG
@@ -1415,7 +1387,6 @@ class HSimulate: public HInstruction {
ZoneList<HValue*> values_;
ZoneList<int> assigned_indexes_;
Zone* zone_;
- RemovableSimulate removable_;
};
@@ -2042,9 +2013,6 @@ class HBitNot: public HUnaryOperation {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Integer32();
}
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
virtual HType CalculateInferredType();
virtual HValue* Canonicalize();
@@ -2072,7 +2040,7 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
set_representation(Representation::Integer32());
break;
case kMathAbs:
- // Not setting representation here: it is None intentionally.
+ set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
SetGVNFlag(kChangesNewSpacePromotion);
break;
@@ -2085,9 +2053,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
set_representation(Representation::Double());
SetGVNFlag(kChangesNewSpacePromotion);
break;
- case kMathExp:
- set_representation(Representation::Double());
- break;
default:
UNREACHABLE();
}
@@ -2114,7 +2079,6 @@ class HUnaryMathOperation: public HTemplateInstruction<2> {
case kMathSqrt:
case kMathPowHalf:
case kMathLog:
- case kMathExp:
case kMathSin:
case kMathCos:
case kMathTan:
@@ -2256,7 +2220,6 @@ class HCheckMaps: public HTemplateInstruction<2> {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
-
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
@@ -2417,6 +2380,10 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
SetGVNFlag(kDependsOnMaps);
}
+#ifdef DEBUG
+ virtual void Verify();
+#endif
+
Handle<JSObject> prototype() const { return prototype_; }
Handle<JSObject> holder() const { return holder_; }
@@ -2484,15 +2451,13 @@ class HPhi: public HValue {
indirect_uses_[i] = 0;
}
ASSERT(merged_index >= 0);
+ set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
}
- virtual Representation RepresentationFromInputs();
+ virtual Representation InferredRepresentation();
virtual Range* InferRange(Zone* zone);
- virtual void InferRepresentation(HInferRepresentation* h_infer);
- Representation RepresentationObservedByAllNonPhiUses();
- Representation RepresentationFromUseRequirements();
virtual Representation RequiredInputRepresentation(int index) {
return representation();
}
@@ -2556,17 +2521,14 @@ class HPhi: public HValue {
bool AllOperandsConvertibleToInteger() {
for (int i = 0; i < OperandCount(); ++i) {
if (!OperandAt(i)->IsConvertibleToInteger()) {
- if (FLAG_trace_representation) {
- HValue* input = OperandAt(i);
- PrintF("#%d %s: Input #%d %s at %d is NCTI\n",
- id(), Mnemonic(), input->id(), input->Mnemonic(), i);
- }
return false;
}
}
return true;
}
+ void ResetInteger32Uses();
+
protected:
virtual void DeleteFromGraph();
virtual void InternalSetOperandAt(int index, HValue* value) {
@@ -2655,6 +2617,7 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
+ virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); }
@@ -2749,14 +2712,11 @@ class HConstant: public HTemplateInstruction<0> {
class HBinaryOperation: public HTemplateInstruction<3> {
public:
- HBinaryOperation(HValue* context, HValue* left, HValue* right)
- : observed_output_representation_(Representation::None()) {
+ HBinaryOperation(HValue* context, HValue* left, HValue* right) {
ASSERT(left != NULL && right != NULL);
SetOperandAt(0, context);
SetOperandAt(1, left);
SetOperandAt(2, right);
- observed_input_representation_[0] = Representation::None();
- observed_input_representation_[1] = Representation::None();
}
HValue* context() { return OperandAt(0); }
@@ -2775,34 +2735,11 @@ class HBinaryOperation: public HTemplateInstruction<3> {
return right();
}
- void set_observed_input_representation(Representation left,
- Representation right) {
- observed_input_representation_[0] = left;
- observed_input_representation_[1] = right;
- }
-
- virtual void initialize_output_representation(Representation observed) {
- observed_output_representation_ = observed;
- }
-
- virtual Representation observed_input_representation(int index) {
- if (index == 0) return Representation::Tagged();
- return observed_input_representation_[index - 1];
- }
-
- virtual void InferRepresentation(HInferRepresentation* h_infer);
- virtual Representation RepresentationFromInputs();
- virtual void AssumeRepresentation(Representation r);
-
virtual bool IsCommutative() const { return false; }
virtual void PrintDataTo(StringStream* stream);
DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
-
- private:
- Representation observed_input_representation_[2];
- Representation observed_output_representation_;
};
@@ -2823,8 +2760,6 @@ class HWrapReceiver: public HTemplateInstruction<2> {
virtual HValue* Canonicalize();
- virtual void PrintDataTo(StringStream* stream);
-
DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
};
@@ -2974,9 +2909,6 @@ class HBoundsCheck: public HTemplateInstruction<2> {
}
return Representation::Integer32();
}
- virtual Representation observed_input_representation(int index) {
- return Representation::Integer32();
- }
virtual void PrintDataTo(StringStream* stream);
@@ -2995,9 +2927,12 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
public:
HBitwiseBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
+ set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
- SetFlag(kTruncatingToInt32);
SetAllSideEffects();
+ observed_input_representation_[0] = Representation::Tagged();
+ observed_input_representation_[1] = Representation::None();
+ observed_input_representation_[2] = Representation::None();
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -3010,32 +2945,28 @@ class HBitwiseBinaryOperation: public HBinaryOperation {
if (!to.IsTagged()) {
ASSERT(to.IsInteger32());
ClearAllSideEffects();
+ SetFlag(kTruncatingToInt32);
SetFlag(kUseGVN);
- } else {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
}
}
- virtual void UpdateRepresentation(Representation new_rep,
- HInferRepresentation* h_infer,
- const char* reason) {
- // We only generate either int32 or generic tagged bitwise operations.
- if (new_rep.IsDouble()) new_rep = Representation::Integer32();
- HValue::UpdateRepresentation(new_rep, h_infer, reason);
- }
+ virtual HType CalculateInferredType();
- virtual void initialize_output_representation(Representation observed) {
- if (observed.IsDouble()) observed = Representation::Integer32();
- HBinaryOperation::initialize_output_representation(observed);
+ virtual Representation ObservedInputRepresentation(int index) {
+ return observed_input_representation_[index];
}
- virtual HType CalculateInferredType();
+ void InitializeObservedInputRepresentation(Representation r) {
+ observed_input_representation_[1] = r;
+ observed_input_representation_[2] = r;
+ }
DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
private:
virtual bool IsDeletable() const { return true; }
+
+ Representation observed_input_representation_[3];
};
@@ -3068,15 +2999,13 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
public:
HArithmeticBinaryOperation(HValue* context, HValue* left, HValue* right)
: HBinaryOperation(context, left, right) {
- SetAllSideEffects();
+ set_representation(Representation::Tagged());
SetFlag(kFlexibleRepresentation);
+ SetAllSideEffects();
}
virtual void RepresentationChanged(Representation to) {
- if (to.IsTagged()) {
- SetAllSideEffects();
- ClearFlag(kUseGVN);
- } else {
+ if (!to.IsTagged()) {
ClearAllSideEffects();
SetFlag(kUseGVN);
}
@@ -3089,6 +3018,13 @@ class HArithmeticBinaryOperation: public HBinaryOperation {
: representation();
}
+ virtual Representation InferredRepresentation() {
+ if (left()->representation().Equals(right()->representation())) {
+ return left()->representation();
+ }
+ return HValue::InferredRepresentation();
+ }
+
private:
virtual bool IsDeletable() const { return true; }
};
@@ -3107,9 +3043,11 @@ class HCompareGeneric: public HBinaryOperation {
}
virtual Representation RequiredInputRepresentation(int index) {
- return index == 0
- ? Representation::Tagged()
- : representation();
+ return Representation::Tagged();
+ }
+
+ Representation GetInputRepresentation() const {
+ return Representation::Tagged();
}
Token::Value token() const { return token_; }
@@ -3128,7 +3066,6 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
public:
HCompareIDAndBranch(HValue* left, HValue* right, Token::Value token)
: token_(token) {
- SetFlag(kFlexibleRepresentation);
ASSERT(Token::IsCompareOp(token));
SetOperandAt(0, left);
SetOperandAt(1, right);
@@ -3138,26 +3075,20 @@ class HCompareIDAndBranch: public HTemplateControlInstruction<2, 2> {
HValue* right() { return OperandAt(1); }
Token::Value token() const { return token_; }
- void set_observed_input_representation(Representation left,
- Representation right) {
- observed_input_representation_[0] = left;
- observed_input_representation_[1] = right;
+ void SetInputRepresentation(Representation r);
+ Representation GetInputRepresentation() const {
+ return input_representation_;
}
- virtual void InferRepresentation(HInferRepresentation* h_infer);
-
virtual Representation RequiredInputRepresentation(int index) {
- return representation();
- }
- virtual Representation observed_input_representation(int index) {
- return observed_input_representation_[index];
+ return input_representation_;
}
virtual void PrintDataTo(StringStream* stream);
DECLARE_CONCRETE_INSTRUCTION(CompareIDAndBranch)
private:
- Representation observed_input_representation_[2];
+ Representation input_representation_;
Token::Value token_;
};
@@ -3218,9 +3149,6 @@ class HIsNilAndBranch: public HUnaryControlInstruction {
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
- virtual Representation observed_input_representation(int index) {
- return Representation::Tagged();
- }
DECLARE_CONCRETE_INSTRUCTION(IsNilAndBranch)
@@ -3498,9 +3426,6 @@ class HPower: public HTemplateInstruction<2> {
? Representation::Double()
: Representation::None();
}
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
DECLARE_CONCRETE_INSTRUCTION(Power)
@@ -3661,16 +3586,6 @@ class HDiv: public HArithmeticBinaryOperation {
SetFlag(kCanOverflow);
}
- bool HasPowerOf2Divisor() {
- if (right()->IsConstant() &&
- HConstant::cast(right())->HasInteger32Value()) {
- int32_t value = HConstant::cast(right())->Integer32Value();
- return value != 0 && (IsPowerOf2(value) || IsPowerOf2(-value));
- }
-
- return false;
- }
-
virtual HValue* EnsureAndPropagateNotMinusZero(BitVector* visited);
static HInstruction* NewHDiv(Zone* zone,
@@ -3696,21 +3611,14 @@ class HMathMinMax: public HArithmeticBinaryOperation {
operation_(op) { }
virtual Representation RequiredInputRepresentation(int index) {
- return index == 0 ? Representation::Tagged()
- : representation();
- }
-
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
-
- virtual void InferRepresentation(HInferRepresentation* h_infer);
+ return index == 0
+ ? Representation::Tagged()
+ : representation();
+ }
- virtual Representation RepresentationFromInputs() {
- Representation left_rep = left()->representation();
- Representation right_rep = right()->representation();
- if ((left_rep.IsNone() || left_rep.IsInteger32()) &&
- (right_rep.IsNone() || right_rep.IsInteger32())) {
+ virtual Representation InferredRepresentation() {
+ if (left()->representation().IsInteger32() &&
+ right()->representation().IsInteger32()) {
return Representation::Integer32();
}
return Representation::Double();
@@ -3829,25 +3737,6 @@ class HSar: public HBitwiseBinaryOperation {
};
-class HRor: public HBitwiseBinaryOperation {
- public:
- HRor(HValue* context, HValue* left, HValue* right)
- : HBitwiseBinaryOperation(context, left, right) {
- ChangeRepresentation(Representation::Integer32());
- }
-
- static HInstruction* NewHRor(Zone* zone,
- HValue* context,
- HValue* left,
- HValue* right);
-
- DECLARE_CONCRETE_INSTRUCTION(Ror)
-
- protected:
- virtual bool DataEquals(HValue* other) { return true; }
-};
-
-
class HOsrEntry: public HTemplateInstruction<0> {
public:
explicit HOsrEntry(BailoutId ast_id) : ast_id_(ast_id) {
@@ -4028,8 +3917,7 @@ inline bool StoringValueNeedsWriteBarrier(HValue* value) {
inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
HValue* new_space_dominator) {
- return (!object->IsAllocateObject() && !object->IsFastLiteral()) ||
- (object != new_space_dominator);
+ return !object->IsAllocateObject() || (object != new_space_dominator);
}
@@ -4355,59 +4243,29 @@ class ArrayInstructionInterface {
virtual ~ArrayInstructionInterface() { };
};
-
-class HLoadKeyed
+class HLoadKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- HLoadKeyed(HValue* obj,
- HValue* key,
- HValue* dependency,
- ElementsKind elements_kind)
+ HLoadKeyedFastElement(HValue* obj,
+ HValue* key,
+ HValue* dependency,
+ ElementsKind elements_kind = FAST_ELEMENTS)
: bit_field_(0) {
+ ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
bit_field_ = ElementsKindField::encode(elements_kind);
-
+ if (IsFastSmiElementsKind(elements_kind) &&
+ IsFastPackedElementsKind(elements_kind)) {
+ set_type(HType::Smi());
+ }
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, dependency);
-
- if (!is_external()) {
- // I can detect the case between storing double (holey and fast) and
- // smi/object by looking at elements_kind_.
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
- IsFastDoubleElementsKind(elements_kind));
-
- if (IsFastSmiOrObjectElementsKind(elements_kind)) {
- if (IsFastSmiElementsKind(elements_kind) &&
- IsFastPackedElementsKind(elements_kind)) {
- set_type(HType::Smi());
- }
-
- set_representation(Representation::Tagged());
- SetGVNFlag(kDependsOnArrayElements);
- } else {
- set_representation(Representation::Double());
- SetGVNFlag(kDependsOnDoubleArrayElements);
- }
- } else {
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- set_representation(Representation::Double());
- } else {
- set_representation(Representation::Integer32());
- }
-
- SetGVNFlag(kDependsOnSpecializedArrayElements);
- // Native code could change the specialized array.
- SetGVNFlag(kDependsOnCalls);
- }
-
+ set_representation(Representation::Tagged());
+ SetGVNFlag(kDependsOnArrayElements);
SetFlag(kUseGVN);
}
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
- }
- HValue* elements() { return OperandAt(0); }
+ HValue* object() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* dependency() { return OperandAt(2); }
uint32_t index_offset() { return IndexOffsetField::decode(bit_field_); }
@@ -4425,68 +4283,164 @@ class HLoadKeyed
}
virtual Representation RequiredInputRepresentation(int index) {
- // kind_fast: tagged[int32] (none)
- // kind_double: tagged[int32] (none)
- // kind_external: external[int32] (none)
- if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
- }
+ // The key is supposed to be Integer32.
+ if (index == 0) return Representation::Tagged();
if (index == 1) return Representation::Integer32();
return Representation::None();
}
- virtual Representation observed_input_representation(int index) {
- return RequiredInputRepresentation(index);
- }
-
virtual void PrintDataTo(StringStream* stream);
bool RequiresHoleCheck() const;
- virtual Range* InferRange(Zone* zone);
-
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement)
protected:
virtual bool DataEquals(HValue* other) {
- if (!other->IsLoadKeyed()) return false;
- HLoadKeyed* other_load = HLoadKeyed::cast(other);
-
+ if (!other->IsLoadKeyedFastElement()) return false;
+ HLoadKeyedFastElement* other_load = HLoadKeyedFastElement::cast(other);
if (IsDehoisted() && index_offset() != other_load->index_offset())
return false;
return elements_kind() == other_load->elements_kind();
}
private:
- virtual bool IsDeletable() const {
- return !RequiresHoleCheck();
+ virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+
+ class ElementsKindField: public BitField<ElementsKind, 0, 4> {};
+ class IndexOffsetField: public BitField<uint32_t, 4, 27> {};
+ class IsDehoistedField: public BitField<bool, 31, 1> {};
+ uint32_t bit_field_;
+};
+
+
+enum HoleCheckMode { PERFORM_HOLE_CHECK, OMIT_HOLE_CHECK };
+
+
+class HLoadKeyedFastDoubleElement
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+ public:
+ HLoadKeyedFastDoubleElement(
+ HValue* elements,
+ HValue* key,
+ HValue* dependency,
+ HoleCheckMode hole_check_mode = PERFORM_HOLE_CHECK)
+ : index_offset_(0),
+ is_dehoisted_(false),
+ hole_check_mode_(hole_check_mode) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
+ set_representation(Representation::Double());
+ SetGVNFlag(kDependsOnDoubleArrayElements);
+ SetFlag(kUseGVN);
}
- // Establish some checks around our packed fields
- enum LoadKeyedBits {
- kBitsForElementsKind = 5,
- kBitsForIndexOffset = 26,
- kBitsForIsDehoisted = 1,
+ HValue* elements() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+ void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
- kStartElementsKind = 0,
- kStartIndexOffset = kStartElementsKind + kBitsForElementsKind,
- kStartIsDehoisted = kStartIndexOffset + kBitsForIndexOffset
- };
+ virtual Representation RequiredInputRepresentation(int index) {
+ // The key is supposed to be Integer32.
+ if (index == 0) return Representation::Tagged();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
+ }
- STATIC_ASSERT((kBitsForElementsKind + kBitsForIndexOffset +
- kBitsForIsDehoisted) <= sizeof(uint32_t)*8);
- STATIC_ASSERT(kElementsKindCount <= (1 << kBitsForElementsKind));
- class ElementsKindField:
- public BitField<ElementsKind, kStartElementsKind, kBitsForElementsKind>
- {}; // NOLINT
- class IndexOffsetField:
- public BitField<uint32_t, kStartIndexOffset, kBitsForIndexOffset>
- {}; // NOLINT
- class IsDehoistedField:
- public BitField<bool, kStartIsDehoisted, kBitsForIsDehoisted>
- {}; // NOLINT
- uint32_t bit_field_;
+ bool RequiresHoleCheck() const {
+ return hole_check_mode_ == PERFORM_HOLE_CHECK;
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement)
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ if (!other->IsLoadKeyedFastDoubleElement()) return false;
+ HLoadKeyedFastDoubleElement* other_load =
+ HLoadKeyedFastDoubleElement::cast(other);
+ return hole_check_mode_ == other_load->hole_check_mode_;
+ }
+
+ private:
+ virtual bool IsDeletable() const { return !RequiresHoleCheck(); }
+
+ uint32_t index_offset_;
+ bool is_dehoisted_;
+ HoleCheckMode hole_check_mode_;
+};
+
+
+class HLoadKeyedSpecializedArrayElement
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+ public:
+ HLoadKeyedSpecializedArrayElement(HValue* external_elements,
+ HValue* key,
+ HValue* dependency,
+ ElementsKind elements_kind)
+ : elements_kind_(elements_kind),
+ index_offset_(0),
+ is_dehoisted_(false) {
+ SetOperandAt(0, external_elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, dependency);
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ set_representation(Representation::Double());
+ } else {
+ set_representation(Representation::Integer32());
+ }
+ SetGVNFlag(kDependsOnSpecializedArrayElements);
+ // Native code could change the specialized array.
+ SetGVNFlag(kDependsOnCalls);
+ SetFlag(kUseGVN);
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ // The key is supposed to be Integer32.
+ if (index == 0) return Representation::External();
+ if (index == 1) return Representation::Integer32();
+ return Representation::None();
+ }
+
+ HValue* external_pointer() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* dependency() { return OperandAt(2); }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+ void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+
+ virtual Range* InferRange(Zone* zone);
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement)
+
+ protected:
+ virtual bool DataEquals(HValue* other) {
+ if (!other->IsLoadKeyedSpecializedArrayElement()) return false;
+ HLoadKeyedSpecializedArrayElement* cast_other =
+ HLoadKeyedSpecializedArrayElement::cast(other);
+ return elements_kind_ == cast_other->elements_kind();
+ }
+
+ private:
+ virtual bool IsDeletable() const { return true; }
+
+ ElementsKind elements_kind_;
+ uint32_t index_offset_;
+ bool is_dehoisted_;
};
@@ -4507,7 +4461,6 @@ class HLoadKeyedGeneric: public HTemplateInstruction<3> {
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
- // tagged[tagged]
return Representation::Tagged();
}
@@ -4613,75 +4566,84 @@ class HStoreNamedGeneric: public HTemplateInstruction<3> {
};
-class HStoreKeyed
+class HStoreKeyedFastElement
: public HTemplateInstruction<3>, public ArrayInstructionInterface {
public:
- HStoreKeyed(HValue* obj, HValue* key, HValue* val,
- ElementsKind elements_kind)
+ HStoreKeyedFastElement(HValue* obj, HValue* key, HValue* val,
+ ElementsKind elements_kind = FAST_ELEMENTS)
: elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
SetOperandAt(0, obj);
SetOperandAt(1, key);
SetOperandAt(2, val);
+ SetGVNFlag(kChangesArrayElements);
+ }
- if (is_external()) {
- SetGVNFlag(kChangesSpecializedArrayElements);
- } else if (IsFastDoubleElementsKind(elements_kind)) {
- SetGVNFlag(kChangesDoubleArrayElements);
- SetFlag(kDeoptimizeOnUndefined);
- } else {
- SetGVNFlag(kChangesArrayElements);
- }
+ virtual Representation RequiredInputRepresentation(int index) {
+ // The key is supposed to be Integer32.
+ return index == 1
+ ? Representation::Integer32()
+ : Representation::Tagged();
+ }
- // EXTERNAL_{UNSIGNED_,}{BYTE,SHORT,INT}_ELEMENTS are truncating.
- if (elements_kind >= EXTERNAL_BYTE_ELEMENTS &&
- elements_kind <= EXTERNAL_UNSIGNED_INT_ELEMENTS) {
- SetFlag(kTruncatingToInt32);
- }
+ HValue* object() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+ bool value_is_smi() {
+ return IsFastSmiElementsKind(elements_kind_);
}
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+ void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
- virtual Representation RequiredInputRepresentation(int index) {
- // kind_fast: tagged[int32] = tagged
- // kind_double: tagged[int32] = double
- // kind_external: external[int32] = (double | int32)
- if (index == 0) {
- return is_external() ? Representation::External()
- : Representation::Tagged();
- } else if (index == 1) {
- return Representation::Integer32();
+ bool NeedsWriteBarrier() {
+ if (value_is_smi()) {
+ return false;
+ } else {
+ return StoringValueNeedsWriteBarrier(value());
}
+ }
- ASSERT_EQ(index, 2);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
+ virtual void PrintDataTo(StringStream* stream);
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement)
+
+ private:
+ ElementsKind elements_kind_;
+ uint32_t index_offset_;
+ bool is_dehoisted_;
+};
- return is_external() ? Representation::Integer32()
- : Representation::Tagged();
- }
- bool is_external() const {
- return IsExternalArrayElementsKind(elements_kind());
+class HStoreKeyedFastDoubleElement
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+ public:
+ HStoreKeyedFastDoubleElement(HValue* elements,
+ HValue* key,
+ HValue* val)
+ : index_offset_(0), is_dehoisted_(false) {
+ SetOperandAt(0, elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, val);
+ SetFlag(kDeoptimizeOnUndefined);
+ SetGVNFlag(kChangesDoubleArrayElements);
}
- virtual Representation observed_input_representation(int index) {
- if (index < 2) return RequiredInputRepresentation(index);
- if (IsDoubleOrFloatElementsKind(elements_kind())) {
- return Representation::Double();
- }
- if (is_external()) {
+ virtual Representation RequiredInputRepresentation(int index) {
+ if (index == 1) {
return Representation::Integer32();
+ } else if (index == 2) {
+ return Representation::Double();
+ } else {
+ return Representation::Tagged();
}
- // For fast object elements kinds, don't assume anything.
- return Representation::None();
}
HValue* elements() { return OperandAt(0); }
HValue* key() { return OperandAt(1); }
HValue* value() { return OperandAt(2); }
- bool value_is_smi() const {
- return IsFastSmiElementsKind(elements_kind_);
- }
- ElementsKind elements_kind() const { return elements_kind_; }
uint32_t index_offset() { return index_offset_; }
void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
HValue* GetKey() { return key(); }
@@ -4690,18 +4652,64 @@ class HStoreKeyed
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool NeedsWriteBarrier() {
- if (value_is_smi()) {
- return false;
- } else {
- return StoringValueNeedsWriteBarrier(value());
- }
+ return StoringValueNeedsWriteBarrier(value());
}
bool NeedsCanonicalization();
virtual void PrintDataTo(StringStream* stream);
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement)
+
+ private:
+ uint32_t index_offset_;
+ bool is_dehoisted_;
+};
+
+
+class HStoreKeyedSpecializedArrayElement
+ : public HTemplateInstruction<3>, public ArrayInstructionInterface {
+ public:
+ HStoreKeyedSpecializedArrayElement(HValue* external_elements,
+ HValue* key,
+ HValue* val,
+ ElementsKind elements_kind)
+ : elements_kind_(elements_kind), index_offset_(0), is_dehoisted_(false) {
+ SetGVNFlag(kChangesSpecializedArrayElements);
+ SetOperandAt(0, external_elements);
+ SetOperandAt(1, key);
+ SetOperandAt(2, val);
+ }
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ virtual Representation RequiredInputRepresentation(int index) {
+ if (index == 0) {
+ return Representation::External();
+ } else {
+ bool float_or_double_elements =
+ elements_kind() == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind() == EXTERNAL_DOUBLE_ELEMENTS;
+ if (index == 2 && float_or_double_elements) {
+ return Representation::Double();
+ } else {
+ return Representation::Integer32();
+ }
+ }
+ }
+
+ HValue* external_pointer() { return OperandAt(0); }
+ HValue* key() { return OperandAt(1); }
+ HValue* value() { return OperandAt(2); }
+ ElementsKind elements_kind() const { return elements_kind_; }
+ uint32_t index_offset() { return index_offset_; }
+ void SetIndexOffset(uint32_t index_offset) { index_offset_ = index_offset; }
+ HValue* GetKey() { return key(); }
+ void SetKey(HValue* key) { SetOperandAt(1, key); }
+ bool IsDehoisted() { return is_dehoisted_; }
+ void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement)
private:
ElementsKind elements_kind_;
@@ -4732,7 +4740,6 @@ class HStoreKeyedGeneric: public HTemplateInstruction<4> {
StrictModeFlag strict_mode_flag() { return strict_mode_flag_; }
virtual Representation RequiredInputRepresentation(int index) {
- // tagged[tagged] = tagged
return Representation::Tagged();
}
@@ -4798,7 +4805,6 @@ class HStringAdd: public HBinaryOperation {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
- SetGVNFlag(kChangesNewSpacePromotion);
}
virtual Representation RequiredInputRepresentation(int index) {
@@ -5156,6 +5162,7 @@ class HTypeof: public HTemplateInstruction<2> {
HValue* context() { return OperandAt(0); }
HValue* value() { return OperandAt(1); }
+ virtual HValue* Canonicalize();
virtual void PrintDataTo(StringStream* stream);
virtual Representation RequiredInputRepresentation(int index) {
@@ -5227,33 +5234,6 @@ class HDateField: public HUnaryOperation {
};
-class HSeqStringSetChar: public HTemplateInstruction<3> {
- public:
- HSeqStringSetChar(String::Encoding encoding,
- HValue* string,
- HValue* index,
- HValue* value) : encoding_(encoding) {
- SetOperandAt(0, string);
- SetOperandAt(1, index);
- SetOperandAt(2, value);
- }
-
- String::Encoding encoding() { return encoding_; }
- HValue* string() { return OperandAt(0); }
- HValue* index() { return OperandAt(1); }
- HValue* value() { return OperandAt(2); }
-
- virtual Representation RequiredInputRepresentation(int index) {
- return Representation::Tagged();
- }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class HDeleteProperty: public HBinaryOperation {
public:
HDeleteProperty(HValue* context, HValue* obj, HValue* key)
diff --git a/deps/v8/src/hydrogen.cc b/deps/v8/src/hydrogen.cc
index aad9da3e7..374e54c97 100644
--- a/deps/v8/src/hydrogen.cc
+++ b/deps/v8/src/hydrogen.cc
@@ -133,8 +133,7 @@ HDeoptimize* HBasicBlock::CreateDeoptimize(
}
-HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
- RemovableSimulate removable) {
+HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id) {
ASSERT(HasEnvironment());
HEnvironment* environment = last_environment();
ASSERT(ast_id.IsNone() ||
@@ -143,12 +142,8 @@ HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
int push_count = environment->push_count();
int pop_count = environment->pop_count();
- HSimulate* instr =
- new(zone()) HSimulate(ast_id, pop_count, zone(), removable);
- // Order of pushed values: newest (top of stack) first. This allows
- // HSimulate::MergeInto() to easily append additional pushed values
- // that are older (from further down the stack).
- for (int i = 0; i < push_count; ++i) {
+ HSimulate* instr = new(zone()) HSimulate(ast_id, pop_count, zone());
+ for (int i = push_count - 1; i >= 0; --i) {
instr->AddPushedValue(environment->ExpressionStackAt(i));
}
for (int i = 0; i < environment->assigned_variables()->length(); ++i) {
@@ -1296,7 +1291,7 @@ void HRangeAnalysis::Analyze(HBasicBlock* block) {
void HRangeAnalysis::InferControlFlowRange(HCompareIDAndBranch* test,
HBasicBlock* dest) {
ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
- if (test->representation().IsInteger32()) {
+ if (test->GetInputRepresentation().IsInteger32()) {
Token::Value op = test->token();
if (test->SecondSuccessor() == dest) {
op = Token::NegateCompareOp(op);
@@ -2244,8 +2239,32 @@ void HGlobalValueNumberer::AnalyzeGraph() {
}
+class HInferRepresentation BASE_EMBEDDED {
+ public:
+ explicit HInferRepresentation(HGraph* graph)
+ : graph_(graph),
+ worklist_(8, graph->zone()),
+ in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
+
+ void Analyze();
+
+ private:
+ Representation TryChange(HValue* current);
+ void AddToWorklist(HValue* current);
+ void InferBasedOnInputs(HValue* current);
+ void AddDependantsToWorklist(HValue* current);
+ void InferBasedOnUses(HValue* current);
+
+ Zone* zone() const { return graph_->zone(); }
+
+ HGraph* graph_;
+ ZoneList<HValue*> worklist_;
+ BitVector in_worklist_;
+};
+
+
void HInferRepresentation::AddToWorklist(HValue* current) {
- if (current->representation().IsTagged()) return;
+ if (current->representation().IsSpecialization()) return;
if (!current->CheckFlag(HValue::kFlexibleRepresentation)) return;
if (in_worklist_.Contains(current->id())) return;
worklist_.Add(current, zone());
@@ -2253,6 +2272,105 @@ void HInferRepresentation::AddToWorklist(HValue* current) {
}
+// This method tries to specialize the representation type of the value
+// given as a parameter. The value is asked to infer its representation type
+// based on its inputs. If the inferred type is more specialized, then this
+// becomes the new representation type of the node.
+void HInferRepresentation::InferBasedOnInputs(HValue* current) {
+ Representation r = current->representation();
+ if (r.IsSpecialization()) return;
+ ASSERT(current->CheckFlag(HValue::kFlexibleRepresentation));
+ Representation inferred = current->InferredRepresentation();
+ if (inferred.IsSpecialization()) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on inputs\n",
+ current->id(),
+ r.Mnemonic(),
+ inferred.Mnemonic());
+ }
+ current->ChangeRepresentation(inferred);
+ AddDependantsToWorklist(current);
+ }
+}
+
+
+void HInferRepresentation::AddDependantsToWorklist(HValue* value) {
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ AddToWorklist(it.value());
+ }
+ for (int i = 0; i < value->OperandCount(); ++i) {
+ AddToWorklist(value->OperandAt(i));
+ }
+}
+
+
+// This method calculates whether specializing the representation of the value
+// given as the parameter has a benefit in terms of less necessary type
+// conversions. If there is a benefit, then the representation of the value is
+// specialized.
+void HInferRepresentation::InferBasedOnUses(HValue* value) {
+ Representation r = value->representation();
+ if (r.IsSpecialization() || value->HasNoUses()) return;
+ ASSERT(value->CheckFlag(HValue::kFlexibleRepresentation));
+ Representation new_rep = TryChange(value);
+ if (!new_rep.IsNone()) {
+ if (!value->representation().Equals(new_rep)) {
+ if (FLAG_trace_representation) {
+ PrintF("Changing #%d representation %s -> %s based on uses\n",
+ value->id(),
+ r.Mnemonic(),
+ new_rep.Mnemonic());
+ }
+ value->ChangeRepresentation(new_rep);
+ AddDependantsToWorklist(value);
+ }
+ }
+}
+
+
+Representation HInferRepresentation::TryChange(HValue* value) {
+ // Array of use counts for each representation.
+ int use_count[Representation::kNumRepresentations] = { 0 };
+
+ for (HUseIterator it(value->uses()); !it.Done(); it.Advance()) {
+ HValue* use = it.value();
+ Representation rep = use->ObservedInputRepresentation(it.index());
+ if (rep.IsNone()) continue;
+ if (FLAG_trace_representation) {
+ PrintF("%d %s is used by %d %s as %s\n",
+ value->id(),
+ value->Mnemonic(),
+ use->id(),
+ use->Mnemonic(),
+ rep.Mnemonic());
+ }
+ if (use->IsPhi()) HPhi::cast(use)->AddIndirectUsesTo(&use_count[0]);
+ use_count[rep.kind()] += use->LoopWeight();
+ }
+ int tagged_count = use_count[Representation::kTagged];
+ int double_count = use_count[Representation::kDouble];
+ int int32_count = use_count[Representation::kInteger32];
+ int non_tagged_count = double_count + int32_count;
+
+ // If a non-loop phi has tagged uses, don't convert it to untagged.
+ if (value->IsPhi() && !value->block()->IsLoopHeader() && tagged_count > 0) {
+ return Representation::None();
+ }
+
+ // Prefer unboxing over boxing, the latter is more expensive.
+ if (tagged_count > non_tagged_count) return Representation::None();
+
+ // Prefer Integer32 over Double, if possible.
+ if (int32_count > 0 && value->IsConvertibleToInteger()) {
+ return Representation::Integer32();
+ }
+
+ if (double_count > 0) return Representation::Double();
+
+ return Representation::None();
+}
+
+
void HInferRepresentation::Analyze() {
HPhase phase("H_Infer representations", graph_);
@@ -2303,6 +2421,7 @@ void HInferRepresentation::Analyze() {
it.Advance()) {
HPhi* phi = phi_list->at(it.Current());
phi->set_is_convertible_to_integer(false);
+ phi->ResetInteger32Uses();
}
}
@@ -2338,74 +2457,8 @@ void HInferRepresentation::Analyze() {
while (!worklist_.is_empty()) {
HValue* current = worklist_.RemoveLast();
in_worklist_.Remove(current->id());
- current->InferRepresentation(this);
- }
-
- // Lastly: any instruction that we don't have representation information
- // for defaults to Tagged.
- for (int i = 0; i < graph_->blocks()->length(); ++i) {
- HBasicBlock* block = graph_->blocks()->at(i);
- const ZoneList<HPhi*>* phis = block->phis();
- for (int j = 0; j < phis->length(); ++j) {
- HPhi* phi = phis->at(j);
- if (phi->representation().IsNone()) {
- phi->ChangeRepresentation(Representation::Tagged());
- }
- }
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
- if (current->representation().IsNone() &&
- current->CheckFlag(HInstruction::kFlexibleRepresentation)) {
- current->ChangeRepresentation(Representation::Tagged());
- }
- }
- }
-}
-
-
-void HGraph::MergeRemovableSimulates() {
- for (int i = 0; i < blocks()->length(); ++i) {
- HBasicBlock* block = blocks()->at(i);
- // Always reset the folding candidate at the start of a block.
- HSimulate* folding_candidate = NULL;
- // Nasty heuristic: Never remove the first simulate in a block. This
- // just so happens to have a beneficial effect on register allocation.
- bool first = true;
- for (HInstruction* current = block->first();
- current != NULL; current = current->next()) {
- if (current->IsLeaveInlined()) {
- // Never fold simulates from inlined environments into simulates
- // in the outer environment.
- // (Before each HEnterInlined, there is a non-foldable HSimulate
- // anyway, so we get the barrier in the other direction for free.)
- if (folding_candidate != NULL) {
- folding_candidate->DeleteAndReplaceWith(NULL);
- }
- folding_candidate = NULL;
- continue;
- }
- // If we have an HSimulate and a candidate, perform the folding.
- if (!current->IsSimulate()) continue;
- if (first) {
- first = false;
- continue;
- }
- HSimulate* current_simulate = HSimulate::cast(current);
- if (folding_candidate != NULL) {
- folding_candidate->MergeInto(current_simulate);
- folding_candidate->DeleteAndReplaceWith(NULL);
- folding_candidate = NULL;
- }
- // Check if the current simulate is a candidate for folding.
- if (current_simulate->previous()->HasObservableSideEffects() &&
- !current_simulate->next()->IsSimulate()) {
- continue;
- }
- if (!current_simulate->is_candidate_for_removal()) {
- continue;
- }
- folding_candidate = current_simulate;
- }
+ InferBasedOnInputs(current);
+ InferBasedOnUses(current);
}
}
@@ -2500,6 +2553,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
} else {
next = HInstruction::cast(use_value);
}
+
// For constants we try to make the representation change at compile
// time. When a representation change is not possible without loss of
// information we treat constants like normal instructions and insert the
@@ -2511,7 +2565,7 @@ void HGraph::InsertRepresentationChangeForUse(HValue* value,
if (value->IsConstant()) {
HConstant* constant = HConstant::cast(value);
// Try to create a new copy of the constant with the new representation.
- new_value = (is_truncating && to.IsInteger32())
+ new_value = is_truncating
? constant->CopyToTruncatedInt32(zone())
: constant->CopyToRepresentation(to, zone());
}
@@ -2571,23 +2625,9 @@ void HGraph::InsertRepresentationChanges() {
for (int i = 0; i < phi_list()->length(); i++) {
HPhi* phi = phi_list()->at(i);
if (!phi->CheckFlag(HValue::kTruncatingToInt32)) continue;
- for (HUseIterator it(phi->uses()); !it.Done(); it.Advance()) {
- // If a Phi is used as a non-truncating int32 or as a double,
- // clear its "truncating" flag.
- HValue* use = it.value();
- Representation input_representation =
- use->RequiredInputRepresentation(it.index());
- if ((input_representation.IsInteger32() &&
- !use->CheckFlag(HValue::kTruncatingToInt32)) ||
- input_representation.IsDouble()) {
- if (FLAG_trace_representation) {
- PrintF("#%d Phi is not truncating because of #%d %s\n",
- phi->id(), it.value()->id(), it.value()->Mnemonic());
- }
- phi->ClearFlag(HValue::kTruncatingToInt32);
- change = true;
- break;
- }
+ if (!phi->CheckUsesForFlag(HValue::kTruncatingToInt32)) {
+ phi->ClearFlag(HValue::kTruncatingToInt32);
+ change = true;
}
}
}
@@ -2602,9 +2642,8 @@ void HGraph::InsertRepresentationChanges() {
// Process normal instructions.
HInstruction* current = blocks_[i]->first();
while (current != NULL) {
- HInstruction* next = current->next();
InsertRepresentationChangesForValue(current);
- current = next;
+ current = current->next();
}
}
}
@@ -2676,18 +2715,17 @@ bool Uint32Analysis::IsSafeUint32Use(HValue* val, HValue* use) {
} else if (use->IsChange() || use->IsSimulate()) {
// Conversions and deoptimization have special support for unt32.
return true;
- } else if (use->IsStoreKeyed()) {
- HStoreKeyed* store = HStoreKeyed::cast(use);
- if (store->is_external()) {
- // Storing a value into an external integer array is a bit level
- // operation.
- if (store->value() == val) {
- // Clamping or a conversion to double should have beed inserted.
- ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
- ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
- return true;
- }
+ } else if (use->IsStoreKeyedSpecializedArrayElement()) {
+ // Storing a value into an external integer array is a bit level operation.
+ HStoreKeyedSpecializedArrayElement* store =
+ HStoreKeyedSpecializedArrayElement::cast(use);
+
+ if (store->value() == val) {
+ // Clamping or a conversion to double should have beed inserted.
+ ASSERT(store->elements_kind() != EXTERNAL_PIXEL_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_FLOAT_ELEMENTS);
+ ASSERT(store->elements_kind() != EXTERNAL_DOUBLE_ELEMENTS);
+ return true;
}
}
@@ -2999,9 +3037,7 @@ void TestContext::ReturnValue(HValue* value) {
void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
ASSERT(!instr->IsControlInstruction());
owner()->AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
@@ -3025,9 +3061,7 @@ void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
}
owner()->AddInstruction(instr);
owner()->Push(instr);
- if (instr->HasObservableSideEffects()) {
- owner()->AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) owner()->AddSimulate(ast_id);
}
@@ -3059,7 +3093,7 @@ void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
// this one isn't actually needed (and wouldn't work if it were targeted).
if (instr->HasObservableSideEffects()) {
builder->Push(instr);
- builder->AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ builder->AddSimulate(ast_id);
builder->Pop();
}
BuildBranch(instr);
@@ -3237,8 +3271,9 @@ HGraph* HGraphBuilder::CreateGraph() {
// optimization. Disable optimistic LICM in that case.
Handle<Code> unoptimized_code(info()->shared_info()->code());
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
int checksum = type_info->own_type_change_checksum();
int composite_checksum = graph()->update_type_change_checksum(checksum);
graph()->set_use_optimistic_licm(
@@ -3285,11 +3320,6 @@ bool HGraph::Optimize(SmartArrayPointer<char>* bailout_reason) {
HInferRepresentation rep(this);
rep.Analyze();
- // Remove HSimulate instructions that have turned out not to be needed
- // after all by folding them into the following HSimulate.
- // This must happen after inferring representations.
- MergeRemovableSimulates();
-
MarkDeoptimizeOnUndefined();
InsertRepresentationChanges();
@@ -3446,10 +3476,7 @@ class BoundsCheckBbData: public ZoneObject {
// (either upper or lower; note that HasSingleCheck() becomes false).
// Otherwise one of the current checks is modified so that it also covers
// new_offset, and new_check is removed.
- //
- // If the check cannot be modified because the context is unknown it
- // returns false, otherwise it returns true.
- bool CoverCheck(HBoundsCheck* new_check,
+ void CoverCheck(HBoundsCheck* new_check,
int32_t new_offset) {
ASSERT(new_check->index()->representation().IsInteger32());
bool keep_new_check = false;
@@ -3460,13 +3487,12 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
upper_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(upper_check_,
- &added_upper_index_,
- &added_upper_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
+ BuildOffsetAdd(upper_check_,
+ &added_upper_index_,
+ &added_upper_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
upper_check_->SetOperandAt(0, added_upper_index_);
}
} else if (new_offset < lower_offset_) {
@@ -3475,13 +3501,12 @@ class BoundsCheckBbData: public ZoneObject {
keep_new_check = true;
lower_check_ = new_check;
} else {
- bool result = BuildOffsetAdd(lower_check_,
- &added_lower_index_,
- &added_lower_offset_,
- Key()->IndexBase(),
- new_check->index()->representation(),
- new_offset);
- if (!result) return false;
+ BuildOffsetAdd(lower_check_,
+ &added_lower_index_,
+ &added_lower_offset_,
+ Key()->IndexBase(),
+ new_check->index()->representation(),
+ new_offset);
lower_check_->SetOperandAt(0, added_lower_index_);
}
} else {
@@ -3491,8 +3516,6 @@ class BoundsCheckBbData: public ZoneObject {
if (!keep_new_check) {
new_check->DeleteAndReplaceWith(NULL);
}
-
- return true;
}
void RemoveZeroOperations() {
@@ -3535,34 +3558,20 @@ class BoundsCheckBbData: public ZoneObject {
BoundsCheckBbData* next_in_bb_;
BoundsCheckBbData* father_in_dt_;
- // Given an existing add instruction and a bounds check it tries to
- // find the current context (either of the add or of the check index).
- HValue* IndexContext(HAdd* add, HBoundsCheck* check) {
- if (add != NULL) {
- return add->context();
- }
- if (check->index()->IsBinaryOperation()) {
- return HBinaryOperation::cast(check->index())->context();
- }
- return NULL;
- }
-
- // This function returns false if it cannot build the add because the
- // current context cannot be determined.
- bool BuildOffsetAdd(HBoundsCheck* check,
+ void BuildOffsetAdd(HBoundsCheck* check,
HAdd** add,
HConstant** constant,
HValue* original_value,
Representation representation,
int32_t new_offset) {
- HValue* index_context = IndexContext(*add, check);
- if (index_context == NULL) return false;
-
HConstant* new_constant = new(BasicBlock()->zone())
HConstant(new_offset, Representation::Integer32());
if (*add == NULL) {
new_constant->InsertBefore(check);
- *add = new(BasicBlock()->zone()) HAdd(index_context,
+ // Because of the bounds checks elimination algorithm, the index is always
+ // an HAdd or an HSub here, so we can safely cast to an HBinaryOperation.
+ HValue* context = HBinaryOperation::cast(check->index())->context();
+ *add = new(BasicBlock()->zone()) HAdd(context,
original_value,
new_constant);
(*add)->AssumeRepresentation(representation);
@@ -3572,7 +3581,6 @@ class BoundsCheckBbData: public ZoneObject {
(*constant)->DeleteAndReplaceWith(new_constant);
}
*constant = new_constant;
- return true;
}
void RemoveZeroAdd(HAdd** add, HConstant** constant) {
@@ -3647,11 +3655,9 @@ void HGraph::EliminateRedundantBoundsChecks(HBasicBlock* bb,
*data_p = bb_data_list;
} else if (data->OffsetIsCovered(offset)) {
check->DeleteAndReplaceWith(NULL);
- } else if (data->BasicBlock() != bb ||
- !data->CoverCheck(check, offset)) {
- // If the check is in the current BB we try to modify it by calling
- // "CoverCheck", but if also that fails we record the current offsets
- // in a new data instance because from now on they are covered.
+ } else if (data->BasicBlock() == bb) {
+ data->CoverCheck(check, offset);
+ } else {
int32_t new_lower_offset = offset < data->LowerOffset()
? offset
: data->LowerOffset();
@@ -3751,11 +3757,27 @@ void HGraph::DehoistSimpleArrayIndexComputations() {
instr != NULL;
instr = instr->next()) {
ArrayInstructionInterface* array_instruction = NULL;
- if (instr->IsLoadKeyed()) {
- HLoadKeyed* op = HLoadKeyed::cast(instr);
+ if (instr->IsLoadKeyedFastElement()) {
+ HLoadKeyedFastElement* op = HLoadKeyedFastElement::cast(instr);
+ array_instruction = static_cast<ArrayInstructionInterface*>(op);
+ } else if (instr->IsLoadKeyedFastDoubleElement()) {
+ HLoadKeyedFastDoubleElement* op =
+ HLoadKeyedFastDoubleElement::cast(instr);
+ array_instruction = static_cast<ArrayInstructionInterface*>(op);
+ } else if (instr->IsLoadKeyedSpecializedArrayElement()) {
+ HLoadKeyedSpecializedArrayElement* op =
+ HLoadKeyedSpecializedArrayElement::cast(instr);
+ array_instruction = static_cast<ArrayInstructionInterface*>(op);
+ } else if (instr->IsStoreKeyedFastElement()) {
+ HStoreKeyedFastElement* op = HStoreKeyedFastElement::cast(instr);
+ array_instruction = static_cast<ArrayInstructionInterface*>(op);
+ } else if (instr->IsStoreKeyedFastDoubleElement()) {
+ HStoreKeyedFastDoubleElement* op =
+ HStoreKeyedFastDoubleElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
- } else if (instr->IsStoreKeyed()) {
- HStoreKeyed* op = HStoreKeyed::cast(instr);
+ } else if (instr->IsStoreKeyedSpecializedArrayElement()) {
+ HStoreKeyedSpecializedArrayElement* op =
+ HStoreKeyedSpecializedArrayElement::cast(instr);
array_instruction = static_cast<ArrayInstructionInterface*>(op);
} else {
continue;
@@ -3803,9 +3825,9 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
}
-void HGraphBuilder::AddSimulate(BailoutId ast_id, RemovableSimulate removable) {
+void HGraphBuilder::AddSimulate(BailoutId ast_id) {
ASSERT(current_block() != NULL);
- current_block()->AddSimulate(ast_id, removable);
+ current_block()->AddSimulate(ast_id);
}
@@ -4162,7 +4184,7 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
!clause->label()->IsStringLiteral()) ||
(switch_type == SMI_SWITCH &&
!clause->label()->IsSmiLiteral())) {
- return Bailout("SwitchStatement: mixed label types are not supported");
+ return Bailout("SwitchStatemnt: mixed label types are not supported");
}
}
@@ -4216,13 +4238,12 @@ void HGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
new(zone()) HCompareIDAndBranch(tag_value,
label_value,
Token::EQ_STRICT);
- compare_->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
+ compare_->SetInputRepresentation(Representation::Integer32());
compare = compare_;
} else {
compare = new(zone()) HStringCompareAndBranch(context, tag_value,
- label_value,
- Token::EQ_STRICT);
+ label_value,
+ Token::EQ_STRICT);
}
compare->SetSuccessorAt(0, body_block);
@@ -4581,8 +4602,7 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
// Check that we still have more keys.
HCompareIDAndBranch* compare_index =
new(zone()) HCompareIDAndBranch(index, limit, Token::LT);
- compare_index->set_observed_input_representation(
- Representation::Integer32(), Representation::Integer32());
+ compare_index->SetInputRepresentation(Representation::Integer32());
HBasicBlock* loop_body = graph()->CreateBasicBlock();
HBasicBlock* loop_successor = graph()->CreateBasicBlock();
@@ -4597,11 +4617,10 @@ void HGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
set_current_block(loop_body);
HValue* key = AddInstruction(
- new(zone()) HLoadKeyed(
+ new(zone()) HLoadKeyedFastElement(
environment()->ExpressionStackAt(2), // Enum cache.
environment()->ExpressionStackAt(0), // Iteration index.
- environment()->ExpressionStackAt(0),
- FAST_ELEMENTS));
+ environment()->ExpressionStackAt(0)));
// Check if the expected map still matches that of the enumerable.
// If not just deoptimize.
@@ -4857,7 +4876,7 @@ void HGraphBuilder::VisitLiteral(Literal* expr) {
ASSERT(current_block() != NULL);
ASSERT(current_block()->HasPredecessor());
HConstant* instr =
- new(zone()) HConstant(expr->handle(), Representation::None());
+ new(zone()) HConstant(expr->handle(), Representation::Tagged());
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -5094,9 +5113,7 @@ void HGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
map));
}
AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(key->id(), REMOVABLE_SIMULATE);
- }
+ if (store->HasObservableSideEffects()) AddSimulate(key->id());
} else {
CHECK_ALIVE(VisitForEffect(value));
}
@@ -5208,14 +5225,18 @@ void HGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- AddInstruction(new(zone()) HStoreKeyed(
+ AddInstruction(new(zone()) HStoreKeyedFastElement(
elements,
key,
value,
boilerplate_elements_kind));
break;
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ AddInstruction(new(zone()) HStoreKeyedFastDoubleElement(elements,
+ key,
+ value));
+ break;
default:
UNREACHABLE();
break;
@@ -5260,19 +5281,18 @@ static int ComputeLoadStoreFieldIndex(Handle<Map> type,
}
-void HGraphBuilder::AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map) {
- AddInstruction(new(zone()) HCheckNonSmi(object));
- AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
-}
-
-
HInstruction* HGraphBuilder::BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
Handle<Map> map,
- LookupResult* lookup) {
+ LookupResult* lookup,
+ bool smi_and_map_check) {
ASSERT(lookup->IsFound());
+ if (smi_and_map_check) {
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
+ }
+
// If the property does not exist yet, we have to check that it wasn't made
// readonly or turned into a setter by some meanwhile modifications on the
// prototype chain.
@@ -5341,7 +5361,7 @@ HInstruction* HGraphBuilder::BuildCallSetter(HValue* object,
Handle<Map> map,
Handle<JSFunction> setter,
Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
+ AddCheckConstantFunction(holder, object, map, true);
AddInstruction(new(zone()) HPushArgument(object));
AddInstruction(new(zone()) HPushArgument(value));
return new(zone()) HCallConstantFunction(setter, 2);
@@ -5355,8 +5375,8 @@ HInstruction* HGraphBuilder::BuildStoreNamedMonomorphic(HValue* object,
// Handle a store to a known field.
LookupResult lookup(isolate());
if (ComputeLoadStoreField(map, name, &lookup, true)) {
- AddCheckMapsWithTransitions(object, map);
- return BuildStoreNamedField(object, name, value, map, &lookup);
+ // true = needs smi and map check.
+ return BuildStoreNamedField(object, name, value, map, &lookup, true);
}
// No luck, do a generic store.
@@ -5404,7 +5424,7 @@ void HGraphBuilder::HandlePolymorphicLoadNamedField(Property* expr,
HInstruction* instr;
if (count == types->length() && is_monomorphic_field) {
AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
- instr = BuildLoadNamedField(object, map, &lookup);
+ instr = BuildLoadNamedField(object, map, &lookup, false);
} else {
HValue* context = environment()->LookupContext();
instr = new(zone()) HLoadNamedFieldPolymorphic(context,
@@ -5447,7 +5467,7 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
set_current_block(if_true);
HInstruction* instr;
CHECK_ALIVE(instr =
- BuildStoreNamedField(object, name, value, map, &lookup));
+ BuildStoreNamedField(object, name, value, map, &lookup, false));
instr->set_position(expr->position());
// Goto will add the HSimulate for the store.
AddInstruction(instr);
@@ -5477,10 +5497,10 @@ void HGraphBuilder::HandlePolymorphicStoreNamedField(Assignment* expr,
// unoptimized code).
if (instr->HasObservableSideEffects()) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->id());
} else {
Push(value);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->id());
Drop(1);
}
}
@@ -5523,7 +5543,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Handle<JSFunction> setter;
Handle<JSObject> holder;
if (LookupSetter(map, name, &setter, &holder)) {
- AddCheckConstantFunction(holder, object, map);
+ AddCheckConstantFunction(holder, object, map, true);
if (FLAG_inline_accessors && TryInlineSetter(setter, expr, value)) {
return;
}
@@ -5550,9 +5570,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
Push(value);
instr->set_position(expr->position());
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
} else {
@@ -5569,7 +5587,7 @@ void HGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
&has_side_effects);
Push(value);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
}
@@ -5591,9 +5609,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
new(zone()) HStoreGlobalCell(value, cell, lookup.GetPropertyDetails());
instr->set_position(position);
AddInstruction(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
} else {
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -5607,7 +5623,7 @@ void HGraphBuilder::HandleGlobalVariableAssignment(Variable* var,
instr->set_position(position);
AddInstruction(instr);
ASSERT(instr->HasObservableSideEffects());
- AddSimulate(ast_id, REMOVABLE_SIMULATE);
+ if (instr->HasObservableSideEffects()) AddSimulate(ast_id);
}
}
@@ -5684,7 +5700,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
new(zone()) HStoreContextSlot(context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
}
break;
}
@@ -5724,9 +5740,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
- }
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
HValue* right = Pop();
@@ -5734,9 +5748,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
HInstruction* store;
if (!monomorphic) {
@@ -5758,9 +5770,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
// Drop the simulated receiver and value. Return the value.
Drop(2);
Push(instr);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
} else {
@@ -5776,7 +5786,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ if (has_side_effects) AddSimulate(prop->LoadId());
CHECK_ALIVE(VisitForValue(expr->value()));
@@ -5785,9 +5795,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
HInstruction* instr = BuildBinaryOperation(operation, left, right);
PushAndAdd(instr);
- if (instr->HasObservableSideEffects()) {
- AddSimulate(operation->id(), REMOVABLE_SIMULATE);
- }
+ if (instr->HasObservableSideEffects()) AddSimulate(operation->id());
expr->RecordTypeFeedback(oracle(), zone());
HandleKeyedElementAccess(obj, key, instr, expr, expr->AssignmentId(),
@@ -5799,7 +5807,7 @@ void HGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
Drop(3);
Push(instr);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
return ast_context()->ReturnValue(Pop());
}
@@ -5922,7 +5930,7 @@ void HGraphBuilder::VisitAssignment(Assignment* expr) {
context, var->index(), mode, Top());
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
}
return ast_context()->ReturnValue(Pop());
}
@@ -5959,7 +5967,13 @@ void HGraphBuilder::VisitThrow(Throw* expr) {
HLoadNamedField* HGraphBuilder::BuildLoadNamedField(HValue* object,
Handle<Map> map,
- LookupResult* lookup) {
+ LookupResult* lookup,
+ bool smi_and_map_check) {
+ if (smi_and_map_check) {
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
+ }
+
int index = lookup->GetLocalFieldIndexFromMap(*map);
if (index < 0) {
// Negative property indices are in-object properties, indexed
@@ -5990,7 +6004,7 @@ HInstruction* HGraphBuilder::BuildCallGetter(HValue* object,
Handle<Map> map,
Handle<JSFunction> getter,
Handle<JSObject> holder) {
- AddCheckConstantFunction(holder, object, map);
+ AddCheckConstantFunction(holder, object, map, true);
AddInstruction(new(zone()) HPushArgument(object));
return new(zone()) HCallConstantFunction(getter, 1);
}
@@ -6005,29 +6019,17 @@ HInstruction* HGraphBuilder::BuildLoadNamedMonomorphic(HValue* object,
LookupResult lookup(isolate());
map->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsField()) {
- AddCheckMapsWithTransitions(object, map);
- return BuildLoadNamedField(object, map, &lookup);
+ return BuildLoadNamedField(object, map, &lookup, true);
}
// Handle a load of a constant known function.
if (lookup.IsConstantFunction()) {
- AddCheckMapsWithTransitions(object, map);
+ AddInstruction(new(zone()) HCheckNonSmi(object));
+ AddInstruction(HCheckMaps::NewWithTransitions(object, map, zone()));
Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*map));
return new(zone()) HConstant(function, Representation::Tagged());
}
- // Handle a load from a known field somewhere in the protoype chain.
- LookupInPrototypes(map, name, &lookup);
- if (lookup.IsField()) {
- Handle<JSObject> prototype(JSObject::cast(map->prototype()));
- Handle<JSObject> holder(lookup.holder());
- Handle<Map> holder_map(holder->map());
- AddCheckMapsWithTransitions(object, map);
- HInstruction* holder_value =
- AddInstruction(new(zone()) HCheckPrototypeMaps(prototype, holder));
- return BuildLoadNamedField(holder_value, holder_map, &lookup);
- }
-
// No luck, do a generic load.
return BuildLoadNamedGeneric(object, name, expr);
}
@@ -6060,6 +6062,13 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS: {
+ if (!val->representation().IsInteger32()) {
+ val = AddInstruction(new(zone()) HChange(
+ val,
+ Representation::Integer32(),
+ true, // Truncate to int32.
+ false)); // Don't deoptimize undefined (irrelevant here).
+ }
break;
}
case EXTERNAL_FLOAT_ELEMENTS:
@@ -6076,15 +6085,13 @@ HInstruction* HGraphBuilder::BuildExternalArrayElementAccess(
UNREACHABLE();
break;
}
- return new(zone()) HStoreKeyed(external_elements,
- checked_key,
- val,
- elements_kind);
+ return new(zone()) HStoreKeyedSpecializedArrayElement(
+ external_elements, checked_key, val, elements_kind);
} else {
ASSERT(val == NULL);
- HLoadKeyed* load =
- new(zone()) HLoadKeyed(
- external_elements, checked_key, dependency, elements_kind);
+ HLoadKeyedSpecializedArrayElement* load =
+ new(zone()) HLoadKeyedSpecializedArrayElement(
+ external_elements, checked_key, dependency, elements_kind);
if (FLAG_opt_safe_uint32_operations &&
elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
graph()->RecordUint32Instruction(load);
@@ -6103,6 +6110,10 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
if (is_store) {
ASSERT(val != NULL);
switch (elements_kind) {
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ return new(zone()) HStoreKeyedFastDoubleElement(
+ elements, checked_key, val);
case FAST_SMI_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
// Smi-only arrays need a smi check.
@@ -6110,9 +6121,7 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
// Fall through.
case FAST_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- return new(zone()) HStoreKeyed(
+ return new(zone()) HStoreKeyedFastElement(
elements, checked_key, val, elements_kind);
default:
UNREACHABLE();
@@ -6120,10 +6129,16 @@ HInstruction* HGraphBuilder::BuildFastElementAccess(HValue* elements,
}
}
// It's an element load (!is_store).
- return new(zone()) HLoadKeyed(elements,
- checked_key,
- load_dependency,
- elements_kind);
+ HoleCheckMode mode = IsFastPackedElementsKind(elements_kind) ?
+ OMIT_HOLE_CHECK :
+ PERFORM_HOLE_CHECK;
+ if (IsFastDoubleElementsKind(elements_kind)) {
+ return new(zone()) HLoadKeyedFastDoubleElement(elements, checked_key,
+ load_dependency, mode);
+ } else { // Smi or Object elements.
+ return new(zone()) HLoadKeyedFastElement(elements, checked_key,
+ load_dependency, elements_kind);
+ }
}
@@ -6350,6 +6365,7 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
HInstruction* elements_kind_instr =
AddInstruction(new(zone()) HElementsKind(object));
+ HCompareConstantEqAndBranch* elements_kind_branch = NULL;
HInstruction* elements =
AddInstruction(new(zone()) HLoadElements(object, checkspec));
HLoadExternalArrayPointer* external_elements = NULL;
@@ -6380,9 +6396,8 @@ HValue* HGraphBuilder::HandlePolymorphicElementAccess(HValue* object,
if (type_todo[elements_kind]) {
HBasicBlock* if_true = graph()->CreateBasicBlock();
HBasicBlock* if_false = graph()->CreateBasicBlock();
- HCompareConstantEqAndBranch* elements_kind_branch =
- new(zone()) HCompareConstantEqAndBranch(
- elements_kind_instr, elements_kind, Token::EQ_STRICT);
+ elements_kind_branch = new(zone()) HCompareConstantEqAndBranch(
+ elements_kind_instr, elements_kind, Token::EQ_STRICT);
elements_kind_branch->SetSuccessorAt(0, if_true);
elements_kind_branch->SetSuccessorAt(1, if_false);
current_block()->Finish(elements_kind_branch);
@@ -6654,7 +6669,7 @@ void HGraphBuilder::VisitProperty(Property* expr) {
Handle<JSFunction> getter;
Handle<JSObject> holder;
if (LookupGetter(map, name, &getter, &holder)) {
- AddCheckConstantFunction(holder, Top(), map);
+ AddCheckConstantFunction(holder, Top(), map, true);
if (FLAG_inline_accessors && TryInlineGetter(getter, expr)) return;
AddInstruction(new(zone()) HPushArgument(Pop()));
instr = new(zone()) HCallConstantFunction(getter, 1);
@@ -6680,10 +6695,10 @@ void HGraphBuilder::VisitProperty(Property* expr) {
&has_side_effects);
if (has_side_effects) {
if (ast_context()->IsEffect()) {
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->id());
} else {
Push(load);
- AddSimulate(expr->id(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->id());
Drop(1);
}
}
@@ -6694,23 +6709,22 @@ void HGraphBuilder::VisitProperty(Property* expr) {
}
-void HGraphBuilder::AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map) {
- if (!holder.is_null()) {
- AddInstruction(new(zone()) HCheckPrototypeMaps(
- Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
- }
-}
-
-
void HGraphBuilder::AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
- Handle<Map> receiver_map) {
+ Handle<Map> receiver_map,
+ bool smi_and_map_check) {
// Constant functions have the nice property that the map will change if they
// are overwritten. Therefore it is enough to check the map of the holder and
// its prototypes.
- AddCheckMapsWithTransitions(receiver, receiver_map);
- AddCheckPrototypeMaps(holder, receiver_map);
+ if (smi_and_map_check) {
+ AddInstruction(new(zone()) HCheckNonSmi(receiver));
+ AddInstruction(HCheckMaps::NewWithTransitions(receiver, receiver_map,
+ zone()));
+ }
+ if (!holder.is_null()) {
+ AddInstruction(new(zone()) HCheckPrototypeMaps(
+ Handle<JSObject>(JSObject::cast(receiver_map->prototype())), holder));
+ }
}
@@ -6792,7 +6806,7 @@ void HGraphBuilder::HandlePolymorphicCallNamed(Call* expr,
set_current_block(if_true);
expr->ComputeTarget(map, name);
- AddCheckPrototypeMaps(expr->holder(), map);
+ AddCheckConstantFunction(expr->holder(), receiver, map, false);
if (FLAG_trace_inlining && FLAG_polymorphic_inlining) {
Handle<JSFunction> caller = info()->closure();
SmartArrayPointer<char> caller_name =
@@ -7138,8 +7152,9 @@ bool HGraphBuilder::TryInline(CallKind call_kind,
inlined_count_ += nodes_added;
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+ Handle<Object> maybe_type_info(unoptimized_code->type_feedback_info());
Handle<TypeFeedbackInfo> type_info(
- TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
+ Handle<TypeFeedbackInfo>::cast(maybe_type_info));
graph()->update_type_change_checksum(type_info->own_type_change_checksum());
TraceInline(target, caller, NULL);
@@ -7277,9 +7292,6 @@ bool HGraphBuilder::TryInlineBuiltinFunctionCall(Call* expr, bool drop_extra) {
if (!expr->target()->shared()->HasBuiltinFunctionId()) return false;
BuiltinFunctionId id = expr->target()->shared()->builtin_function_id();
switch (id) {
- case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
case kMathRound:
case kMathAbs:
case kMathSqrt:
@@ -7340,9 +7352,6 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
return true;
}
break;
- case kMathExp:
- if (!FLAG_fast_math) break;
- // Fall through if FLAG_fast_math.
case kMathRound:
case kMathFloor:
case kMathAbs:
@@ -7352,7 +7361,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathCos:
case kMathTan:
if (argument_count == 2 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* argument = Pop();
HValue* context = environment()->LookupContext();
Drop(1); // Receiver.
@@ -7365,7 +7374,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathPow:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
Pop(); // Pop receiver.
@@ -7407,7 +7416,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
break;
case kMathRandom:
if (argument_count == 1 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
Drop(1); // Receiver.
HValue* context = environment()->LookupContext();
HGlobalObject* global_object = new(zone()) HGlobalObject(context);
@@ -7420,7 +7429,7 @@ bool HGraphBuilder::TryInlineBuiltinMethodCall(Call* expr,
case kMathMax:
case kMathMin:
if (argument_count == 3 && check_type == RECEIVER_MAP_CHECK) {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
HValue* right = Pop();
HValue* left = Pop();
Drop(1); // Receiver.
@@ -7469,7 +7478,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
VisitForValue(prop->obj());
if (HasStackOverflow() || current_block() == NULL) return true;
HValue* function = Top();
- AddCheckConstantFunction(expr->holder(), function, function_map);
+ AddCheckConstantFunction(expr->holder(), function, function_map, true);
Drop(1);
VisitForValue(args->at(0));
@@ -7493,10 +7502,7 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
return true;
} else {
// We are inside inlined function and we know exactly what is inside
- // arguments object. But we need to be able to materialize at deopt.
- // TODO(mstarzinger): For now we just ensure arguments are pushed
- // right after HEnterInlined, but we could be smarter about this.
- EnsureArgumentsArePushedForAccess();
+ // arguments object.
HValue* context = environment()->LookupContext();
HValue* wrapped_receiver =
@@ -7522,55 +7528,6 @@ bool HGraphBuilder::TryCallApply(Call* expr) {
}
-// Checks if all maps in |types| are from the same family, i.e., are elements
-// transitions of each other. Returns either NULL if they are not from the same
-// family, or a Map* indicating the map with the first elements kind of the
-// family that is in the list.
-static Map* CheckSameElementsFamily(SmallMapList* types) {
- if (types->length() <= 1) return NULL;
- // Check if all maps belong to the same transition family.
- Map* kinds[kFastElementsKindCount];
- Map* first_map = *types->first();
- ElementsKind first_kind = first_map->elements_kind();
- if (!IsFastElementsKind(first_kind)) return NULL;
- int first_index = GetSequenceIndexFromFastElementsKind(first_kind);
- int last_index = first_index;
-
- for (int i = 0; i < kFastElementsKindCount; i++) kinds[i] = NULL;
-
- kinds[first_index] = first_map;
-
- for (int i = 1; i < types->length(); ++i) {
- Map* map = *types->at(i);
- ElementsKind elements_kind = map->elements_kind();
- if (!IsFastElementsKind(elements_kind)) return NULL;
- int index = GetSequenceIndexFromFastElementsKind(elements_kind);
- if (index < first_index) {
- first_index = index;
- } else if (index > last_index) {
- last_index = index;
- } else if (kinds[index] != map) {
- return NULL;
- }
- kinds[index] = map;
- }
-
- Map* current = kinds[first_index];
- for (int i = first_index + 1; i <= last_index; i++) {
- Map* next = kinds[i];
- if (next != NULL) {
- ElementsKind current_kind = next->elements_kind();
- if (next != current->LookupElementsTransitionMap(current_kind)) {
- return NULL;
- }
- current = next;
- }
- }
-
- return kinds[first_index];
-}
-
-
void HGraphBuilder::VisitCall(Call* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@@ -7610,25 +7567,15 @@ void HGraphBuilder::VisitCall(Call* expr) {
CHECK_ALIVE(VisitExpressions(expr->arguments()));
Handle<String> name = prop->key()->AsLiteral()->AsPropertyName();
- SmallMapList* types = expr->GetReceiverTypes();
- bool monomorphic = expr->IsMonomorphic();
- Handle<Map> receiver_map;
- if (monomorphic) {
- receiver_map = (types == NULL || types->is_empty())
- ? Handle<Map>::null()
- : types->first();
- } else {
- Map* family_map = CheckSameElementsFamily(types);
- if (family_map != NULL) {
- receiver_map = Handle<Map>(family_map);
- monomorphic = expr->ComputeTarget(receiver_map, name);
- }
- }
+ SmallMapList* types = expr->GetReceiverTypes();
HValue* receiver =
environment()->ExpressionStackAt(expr->arguments()->length());
- if (monomorphic) {
+ if (expr->IsMonomorphic()) {
+ Handle<Map> receiver_map = (types == NULL || types->is_empty())
+ ? Handle<Map>::null()
+ : types->first();
if (TryInlineBuiltinMethodCall(expr,
receiver,
receiver_map,
@@ -7650,7 +7597,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
call = PreProcessCall(
new(zone()) HCallNamed(context, name, argument_count));
} else {
- AddCheckConstantFunction(expr->holder(), receiver, receiver_map);
+ AddCheckConstantFunction(expr->holder(), receiver, receiver_map, true);
if (TryInlineCall(expr)) return;
call = PreProcessCall(
@@ -7673,7 +7620,7 @@ void HGraphBuilder::VisitCall(Call* expr) {
VariableProxy* proxy = expr->expression()->AsVariableProxy();
bool global_call = proxy != NULL && proxy->var()->IsUnallocated();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
return Bailout("possible direct call to eval");
}
@@ -7999,13 +7946,14 @@ void HGraphBuilder::VisitSub(UnaryOperation* expr) {
HInstruction* instr =
new(zone()) HMul(context, value, graph_->GetConstantMinus1());
TypeInfo info = oracle()->UnaryType(expr);
- Representation rep = ToRepresentation(info);
if (info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
info = TypeInfo::Unknown();
}
- HBinaryOperation::cast(instr)->set_observed_input_representation(rep, rep);
+ Representation rep = ToRepresentation(info);
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
return ast_context()->ReturnInstruction(instr, expr->id());
}
@@ -8094,11 +8042,8 @@ HInstruction* HGraphBuilder::BuildIncrement(bool returns_original_input,
: graph_->GetConstantMinus1();
HValue* context = environment()->LookupContext();
HInstruction* instr = new(zone()) HAdd(context, Top(), delta);
- // We can't insert a simulate here, because it would break deoptimization,
- // so the HAdd must not have side effects, so we must freeze its
- // representation.
+ TraceRepresentation(expr->op(), info, instr, rep);
instr->AssumeRepresentation(rep);
- instr->ClearAllSideEffects();
AddInstruction(instr);
return instr;
}
@@ -8172,7 +8117,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
new(zone()) HStoreContextSlot(context, var->index(), mode, after);
AddInstruction(instr);
if (instr->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
}
break;
}
@@ -8213,9 +8158,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
load = BuildLoadNamedGeneric(object, name, prop);
}
PushAndAdd(load);
- if (load->HasObservableSideEffects()) {
- AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
- }
+ if (load->HasObservableSideEffects()) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -8243,9 +8186,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
// necessary.
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
- if (store->HasObservableSideEffects()) {
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
- }
+ if (store->HasObservableSideEffects()) AddSimulate(expr->AssignmentId());
} else {
// Keyed property.
@@ -8262,7 +8203,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
false, // is_store
&has_side_effects);
Push(load);
- if (has_side_effects) AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
+ if (has_side_effects) AddSimulate(prop->LoadId());
after = BuildIncrement(returns_original_input, expr);
input = Pop();
@@ -8280,7 +8221,7 @@ void HGraphBuilder::VisitCountOperation(CountOperation* expr) {
environment()->SetExpressionStackAt(0, after);
if (returns_original_input) environment()->SetExpressionStackAt(1, input);
ASSERT(has_side_effects); // Stores always have side effects.
- AddSimulate(expr->AssignmentId(), REMOVABLE_SIMULATE);
+ AddSimulate(expr->AssignmentId());
}
}
@@ -8301,84 +8242,21 @@ HStringCharCodeAt* HGraphBuilder::BuildStringCharCodeAt(HValue* context,
return new(zone()) HStringCharCodeAt(context, string, checked_index);
}
-// Checks if the given shift amounts have form: (sa) and (32 - sa).
-static bool ShiftAmountsAllowReplaceByRotate(HValue* sa,
- HValue* const32_minus_sa) {
- if (!const32_minus_sa->IsSub()) return false;
- HSub* sub = HSub::cast(const32_minus_sa);
- if (sa != sub->right()) return false;
- HValue* const32 = sub->left();
- if (!const32->IsConstant() ||
- HConstant::cast(const32)->Integer32Value() != 32) {
- return false;
- }
- return (sub->right() == sa);
-}
-
-
-// Checks if the left and the right are shift instructions with the oposite
-// directions that can be replaced by one rotate right instruction or not.
-// Returns the operand and the shift amount for the rotate instruction in the
-// former case.
-bool HGraphBuilder::MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount) {
- HShl* shl;
- HShr* shr;
- if (left->IsShl() && right->IsShr()) {
- shl = HShl::cast(left);
- shr = HShr::cast(right);
- } else if (left->IsShr() && right->IsShl()) {
- shl = HShl::cast(right);
- shr = HShr::cast(left);
- } else {
- return false;
- }
- if (shl->left() != shr->left()) return false;
-
- if (!ShiftAmountsAllowReplaceByRotate(shl->right(), shr->right()) &&
- !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
- return false;
- }
- *operand= shr->left();
- *shift_amount = shr->right();
- return true;
-}
-
-
-bool CanBeZero(HValue *right) {
- if (right->IsConstant()) {
- HConstant* right_const = HConstant::cast(right);
- if (right_const->HasInteger32Value() &&
- (right_const->Integer32Value() & 0x1f) != 0) {
- return false;
- }
- }
- return true;
-}
-
HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
HValue* left,
HValue* right) {
HValue* context = environment()->LookupContext();
- TypeInfo left_info, right_info, result_info, combined_info;
- oracle()->BinaryType(expr, &left_info, &right_info, &result_info);
- Representation left_rep = ToRepresentation(left_info);
- Representation right_rep = ToRepresentation(right_info);
- Representation result_rep = ToRepresentation(result_info);
- if (left_info.IsUninitialized()) {
- // Can't have initialized one but not the other.
- ASSERT(right_info.IsUninitialized());
+ TypeInfo info = oracle()->BinaryType(expr);
+ if (info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- left_info = right_info = TypeInfo::Unknown();
+ info = TypeInfo::Unknown();
}
HInstruction* instr = NULL;
switch (expr->op()) {
case Token::ADD:
- if (left_info.IsString() && right_info.IsString()) {
+ if (info.IsString()) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsString(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
@@ -8402,26 +8280,25 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
break;
case Token::BIT_XOR:
case Token::BIT_AND:
+ case Token::BIT_OR:
instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
break;
- case Token::BIT_OR: {
- HValue* operand, *shift_amount;
- if (left_info.IsInteger32() && right_info.IsInteger32() &&
- MatchRotateRight(left, right, &operand, &shift_amount)) {
- instr = new(zone()) HRor(context, operand, shift_amount);
- } else {
- instr = HBitwise::NewHBitwise(zone(), expr->op(), context, left, right);
- }
- break;
- }
case Token::SAR:
instr = HSar::NewHSar(zone(), context, left, right);
break;
case Token::SHR:
instr = HShr::NewHShr(zone(), context, left, right);
- if (FLAG_opt_safe_uint32_operations && instr->IsShr() &&
- CanBeZero(right)) {
- graph()->RecordUint32Instruction(instr);
+ if (FLAG_opt_safe_uint32_operations && instr->IsShr()) {
+ bool can_be_shift_by_zero = true;
+ if (right->IsConstant()) {
+ HConstant* right_const = HConstant::cast(right);
+ if (right_const->HasInteger32Value() &&
+ (right_const->Integer32Value() & 0x1f) != 0) {
+ can_be_shift_by_zero = false;
+ }
+ }
+
+ if (can_be_shift_by_zero) graph()->RecordUint32Instruction(instr);
}
break;
case Token::SHL:
@@ -8431,11 +8308,23 @@ HInstruction* HGraphBuilder::BuildBinaryOperation(BinaryOperation* expr,
UNREACHABLE();
}
- if (instr->IsBinaryOperation()) {
- HBinaryOperation* binop = HBinaryOperation::cast(instr);
- binop->set_observed_input_representation(left_rep, right_rep);
- binop->initialize_output_representation(result_rep);
+ // If we hit an uninitialized binary op stub we will get type info
+ // for a smi operation. If one of the operands is a constant string
+ // do not generate code assuming it is a smi operation.
+ if (info.IsSmi() &&
+ ((left->IsConstant() && HConstant::cast(left)->handle()->IsString()) ||
+ (right->IsConstant() && HConstant::cast(right)->handle()->IsString()))) {
+ return instr;
+ }
+ Representation rep = ToRepresentation(info);
+ // We only generate either int32 or generic tagged bitwise operations.
+ if (instr->IsBitwiseBinaryOperation()) {
+ HBitwiseBinaryOperation::cast(instr)->
+ InitializeObservedInputRepresentation(rep);
+ if (rep.IsDouble()) rep = Representation::Integer32();
}
+ TraceRepresentation(expr->op(), info, instr, rep);
+ instr->AssumeRepresentation(rep);
return instr;
}
@@ -8579,8 +8468,27 @@ void HGraphBuilder::VisitArithmeticExpression(BinaryOperation* expr) {
}
+void HGraphBuilder::TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep) {
+ if (!FLAG_trace_representation) return;
+ // TODO(svenpanne) Under which circumstances are we actually not flexible?
+ // At first glance, this looks a bit weird...
+ bool flexible = value->CheckFlag(HValue::kFlexibleRepresentation);
+ PrintF("Operation %s has type info %s, %schange representation assumption "
+ "for %s (ID %d) from %s to %s\n",
+ Token::Name(op),
+ info.ToString(),
+ flexible ? "" : " DO NOT ",
+ value->Mnemonic(),
+ graph_->GetMaximumValueID(),
+ value->representation().Mnemonic(),
+ rep.Mnemonic());
+}
+
+
Representation HGraphBuilder::ToRepresentation(TypeInfo info) {
- if (info.IsUninitialized()) return Representation::None();
if (info.IsSmi()) return Representation::Integer32();
if (info.IsInteger32()) return Representation::Integer32();
if (info.IsDouble()) return Representation::Double();
@@ -8678,17 +8586,13 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
return ast_context()->ReturnControl(instr, expr->id());
}
- TypeInfo left_type, right_type, overall_type_info;
- oracle()->CompareType(expr, &left_type, &right_type, &overall_type_info);
- Representation combined_rep = ToRepresentation(overall_type_info);
- Representation left_rep = ToRepresentation(left_type);
- Representation right_rep = ToRepresentation(right_type);
+ TypeInfo type_info = oracle()->CompareType(expr);
// Check if this expression was ever executed according to type feedback.
// Note that for the special typeof/null/undefined cases we get unknown here.
- if (overall_type_info.IsUninitialized()) {
+ if (type_info.IsUninitialized()) {
AddInstruction(new(zone()) HSoftDeoptimize);
current_block()->MarkAsDeoptimizing();
- overall_type_info = left_type = right_type = TypeInfo::Unknown();
+ type_info = TypeInfo::Unknown();
}
CHECK_ALIVE(VisitForValue(expr->left()));
@@ -8760,15 +8664,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
HIn* result = new(zone()) HIn(context, left, right);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
- } else if (overall_type_info.IsNonPrimitive()) {
+ } else if (type_info.IsNonPrimitive()) {
switch (op) {
case Token::EQ:
case Token::EQ_STRICT: {
// Can we get away with map check and not instance type check?
Handle<Map> map = oracle()->GetCompareMap(expr);
if (!map.is_null()) {
- AddCheckMapsWithTransitions(left, map);
- AddCheckMapsWithTransitions(right, map);
+ AddInstruction(new(zone()) HCheckNonSmi(left));
+ AddInstruction(HCheckMaps::NewWithTransitions(left, map, zone()));
+ AddInstruction(new(zone()) HCheckNonSmi(right));
+ AddInstruction(HCheckMaps::NewWithTransitions(right, map, zone()));
HCompareObjectEqAndBranch* result =
new(zone()) HCompareObjectEqAndBranch(left, right);
result->set_position(expr->position());
@@ -8787,7 +8693,8 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
default:
return Bailout("Unsupported non-primitive compare");
}
- } else if (overall_type_info.IsSymbol() && Token::IsEqualityOp(op)) {
+ } else if (type_info.IsString() && oracle()->IsSymbolCompare(expr) &&
+ (op == Token::EQ || op == Token::EQ_STRICT)) {
AddInstruction(new(zone()) HCheckNonSmi(left));
AddInstruction(HCheckInstanceType::NewIsSymbol(left, zone()));
AddInstruction(new(zone()) HCheckNonSmi(right));
@@ -8797,17 +8704,17 @@ void HGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
result->set_position(expr->position());
return ast_context()->ReturnControl(result, expr->id());
} else {
- if (combined_rep.IsTagged() || combined_rep.IsNone()) {
+ Representation r = ToRepresentation(type_info);
+ if (r.IsTagged()) {
HCompareGeneric* result =
new(zone()) HCompareGeneric(context, left, right, op);
- result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
return ast_context()->ReturnInstruction(result, expr->id());
} else {
HCompareIDAndBranch* result =
new(zone()) HCompareIDAndBranch(left, right, op);
- result->set_observed_input_representation(left_rep, right_rep);
result->set_position(expr->position());
+ result->SetInputRepresentation(r);
return ast_context()->ReturnControl(result, expr->id());
}
}
@@ -8894,9 +8801,7 @@ void HGraphBuilder::VisitVariableDeclaration(VariableDeclaration* declaration) {
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
- }
+ if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
}
break;
case Variable::LOOKUP:
@@ -8932,9 +8837,7 @@ void HGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* declaration) {
HStoreContextSlot* store = new(zone()) HStoreContextSlot(
context, variable->index(), HStoreContextSlot::kNoCheck, value);
AddInstruction(store);
- if (store->HasObservableSideEffects()) {
- AddSimulate(proxy->id(), REMOVABLE_SIMULATE);
- }
+ if (store->HasObservableSideEffects()) AddSimulate(proxy->id());
break;
}
case Variable::LOOKUP:
@@ -8978,11 +8881,6 @@ void HGraphBuilder::VisitModuleUrl(ModuleUrl* module) {
}
-void HGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
- UNREACHABLE();
-}
-
-
// Generators for inline runtime functions.
// Support for types.
void HGraphBuilder::GenerateIsSmi(CallRuntime* call) {
@@ -9154,39 +9052,6 @@ void HGraphBuilder::GenerateDateField(CallRuntime* call) {
}
-void HGraphBuilder::GenerateOneByteSeqStringSetChar(
- CallRuntime* call) {
- ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* value = Pop();
- HValue* index = Pop();
- HValue* string = Pop();
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::ONE_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
-void HGraphBuilder::GenerateTwoByteSeqStringSetChar(
- CallRuntime* call) {
- ASSERT(call->arguments()->length() == 3);
- CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
- CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
- HValue* value = Pop();
- HValue* index = Pop();
- HValue* string = Pop();
- HValue* context = environment()->LookupContext();
- HStringCharCodeAt* char_code = BuildStringCharCodeAt(context, string, index);
- AddInstruction(char_code);
- HSeqStringSetChar* result = new(zone()) HSeqStringSetChar(
- String::TWO_BYTE_ENCODING, string, index, value);
- return ast_context()->ReturnInstruction(result, call->id());
-}
-
-
void HGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
ASSERT(call->arguments()->length() == 2);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -10032,43 +9897,28 @@ void HStatistics::Print() {
double size_percent = static_cast<double>(size) * 100 / total_size_;
PrintF(" %8u bytes / %4.1f %%\n", size, size_percent);
}
-
- PrintF("---------------------------------------------------------------\n");
- int64_t total = create_graph_ + optimize_graph_ + generate_code_;
- PrintF("%30s - %7.3f ms / %4.1f %% \n",
- "Create graph",
- static_cast<double>(create_graph_) / 1000,
- static_cast<double>(create_graph_) * 100 / total);
- PrintF("%30s - %7.3f ms / %4.1f %% \n",
- "Optimize graph",
- static_cast<double>(optimize_graph_) / 1000,
- static_cast<double>(optimize_graph_) * 100 / total);
- PrintF("%30s - %7.3f ms / %4.1f %% \n",
- "Generate and install code",
- static_cast<double>(generate_code_) / 1000,
- static_cast<double>(generate_code_) * 100 / total);
- PrintF("---------------------------------------------------------------\n");
- PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
- "Total",
- static_cast<double>(total) / 1000,
- static_cast<double>(total) / full_code_gen_);
-
double source_size_in_kb = static_cast<double>(source_size_) / 1024;
double normalized_time = source_size_in_kb > 0
- ? (static_cast<double>(total) / 1000) / source_size_in_kb
+ ? (static_cast<double>(sum) / 1000) / source_size_in_kb
: 0;
- double normalized_size_in_kb = source_size_in_kb > 0
- ? total_size_ / 1024 / source_size_in_kb
+ double normalized_bytes = source_size_in_kb > 0
+ ? total_size_ / source_size_in_kb
: 0;
- PrintF("%30s - %7.3f ms %7.3f kB allocated\n",
- "Average per kB source",
- normalized_time, normalized_size_in_kb);
+ PrintF("%30s - %7.3f ms %7.3f bytes\n", "Sum",
+ normalized_time, normalized_bytes);
+ PrintF("---------------------------------------------------------------\n");
+ PrintF("%30s - %7.3f ms (%.1f times slower than full code gen)\n",
+ "Total",
+ static_cast<double>(total_) / 1000,
+ static_cast<double>(total_) / full_code_gen_);
}
void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
if (name == HPhase::kFullCodeGen) {
full_code_gen_ += ticks;
+ } else if (name == HPhase::kTotal) {
+ total_ += ticks;
} else {
total_size_ += size;
for (int i = 0; i < names_.length(); ++i) {
@@ -10086,6 +9936,8 @@ void HStatistics::SaveTiming(const char* name, int64_t ticks, unsigned size) {
const char* const HPhase::kFullCodeGen = "Full code generator";
+const char* const HPhase::kTotal = "Total";
+
void HPhase::Begin(const char* name,
HGraph* graph,
diff --git a/deps/v8/src/hydrogen.h b/deps/v8/src/hydrogen.h
index 98b05d147..a0d81497f 100644
--- a/deps/v8/src/hydrogen.h
+++ b/deps/v8/src/hydrogen.h
@@ -125,10 +125,7 @@ class HBasicBlock: public ZoneObject {
void Goto(HBasicBlock* block, FunctionState* state = NULL);
int PredecessorIndexOf(HBasicBlock* predecessor) const;
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE) {
- AddInstruction(CreateSimulate(ast_id, removable));
- }
+ void AddSimulate(BailoutId ast_id) { AddInstruction(CreateSimulate(ast_id)); }
void AssignCommonDominator(HBasicBlock* other);
void AssignLoopSuccessorDominators();
@@ -169,7 +166,7 @@ class HBasicBlock: public ZoneObject {
void RegisterPredecessor(HBasicBlock* pred);
void AddDominatedBlock(HBasicBlock* block);
- HSimulate* CreateSimulate(BailoutId ast_id, RemovableSimulate removable);
+ HSimulate* CreateSimulate(BailoutId ast_id);
HDeoptimize* CreateDeoptimize(HDeoptimize::UseEnvironment has_uses);
int block_id_;
@@ -258,7 +255,6 @@ class HGraph: public ZoneObject {
void InitializeInferredTypes();
void InsertTypeConversions();
- void MergeRemovableSimulates();
void InsertRepresentationChanges();
void MarkDeoptimizeOnUndefined();
void ComputeMinusZeroChecks();
@@ -274,7 +270,6 @@ class HGraph: public ZoneObject {
void DehoistSimpleArrayIndexComputations();
void DeadCodeElimination();
void PropagateDeoptimizingMark();
- void EliminateUnusedInstructions();
// Returns false if there are phi-uses of the arguments-object
// which are not supported by the optimizing compiler.
@@ -617,25 +612,6 @@ class HEnvironment: public ZoneObject {
};
-class HInferRepresentation BASE_EMBEDDED {
- public:
- explicit HInferRepresentation(HGraph* graph)
- : graph_(graph),
- worklist_(8, graph->zone()),
- in_worklist_(graph->GetMaximumValueID(), graph->zone()) { }
-
- void Analyze();
- void AddToWorklist(HValue* current);
-
- private:
- Zone* zone() const { return graph_->zone(); }
-
- HGraph* graph_;
- ZoneList<HValue*> worklist_;
- BitVector in_worklist_;
-};
-
-
class HGraphBuilder;
enum ArgumentsAllowedFlag {
@@ -903,8 +879,7 @@ class HGraphBuilder: public AstVisitor {
// Adding instructions.
HInstruction* AddInstruction(HInstruction* instr);
- void AddSimulate(BailoutId ast_id,
- RemovableSimulate removable = FIXED_SIMULATE);
+ void AddSimulate(BailoutId ast_id);
// Bailout environment manipulation.
void Push(HValue* value) { environment()->Push(value); }
@@ -1049,6 +1024,10 @@ class HGraphBuilder: public AstVisitor {
// to push them as outgoing parameters.
template <class Instruction> HInstruction* PreProcessCall(Instruction* call);
+ void TraceRepresentation(Token::Value op,
+ TypeInfo info,
+ HValue* value,
+ Representation rep);
static Representation ToRepresentation(TypeInfo info);
void SetUpScope(Scope* scope);
@@ -1185,7 +1164,8 @@ class HGraphBuilder: public AstVisitor {
HLoadNamedField* BuildLoadNamedField(HValue* object,
Handle<Map> map,
- LookupResult* result);
+ LookupResult* result,
+ bool smi_and_map_check);
HInstruction* BuildLoadNamedGeneric(HValue* object,
Handle<String> name,
Property* expr);
@@ -1206,14 +1186,12 @@ class HGraphBuilder: public AstVisitor {
ElementsKind elements_kind,
bool is_store);
- void AddCheckMapsWithTransitions(HValue* object,
- Handle<Map> map);
-
HInstruction* BuildStoreNamedField(HValue* object,
Handle<String> name,
HValue* value,
Handle<Map> map,
- LookupResult* lookup);
+ LookupResult* lookup,
+ bool smi_and_map_check);
HInstruction* BuildStoreNamedGeneric(HValue* object,
Handle<String> name,
HValue* value);
@@ -1234,17 +1212,10 @@ class HGraphBuilder: public AstVisitor {
HInstruction* BuildThisFunction();
- void AddCheckPrototypeMaps(Handle<JSObject> holder,
- Handle<Map> receiver_map);
-
void AddCheckConstantFunction(Handle<JSObject> holder,
HValue* receiver,
- Handle<Map> receiver_map);
-
- bool MatchRotateRight(HValue* left,
- HValue* right,
- HValue** operand,
- HValue** shift_amount);
+ Handle<Map> receiver_map,
+ bool smi_and_map_check);
Zone* zone() const { return zone_; }
@@ -1378,22 +1349,12 @@ class HStatistics: public Malloced {
return instance.get();
}
- void IncrementSubtotals(int64_t create_graph,
- int64_t optimize_graph,
- int64_t generate_code) {
- create_graph_ += create_graph;
- optimize_graph_ += optimize_graph;
- generate_code_ += generate_code;
- }
-
private:
HStatistics()
: timing_(5),
names_(5),
sizes_(5),
- create_graph_(0),
- optimize_graph_(0),
- generate_code_(0),
+ total_(0),
total_size_(0),
full_code_gen_(0),
source_size_(0) { }
@@ -1401,9 +1362,7 @@ class HStatistics: public Malloced {
List<int64_t> timing_;
List<const char*> names_;
List<unsigned> sizes_;
- int64_t create_graph_;
- int64_t optimize_graph_;
- int64_t generate_code_;
+ int64_t total_;
unsigned total_size_;
int64_t full_code_gen_;
double source_size_;
@@ -1413,6 +1372,7 @@ class HStatistics: public Malloced {
class HPhase BASE_EMBEDDED {
public:
static const char* const kFullCodeGen;
+ static const char* const kTotal;
explicit HPhase(const char* name) { Begin(name, NULL, NULL, NULL); }
HPhase(const char* name, HGraph* graph) {
diff --git a/deps/v8/src/ia32/assembler-ia32-inl.h b/deps/v8/src/ia32/assembler-ia32-inl.h
index 114f87842..7fdf50c7a 100644
--- a/deps/v8/src/ia32/assembler-ia32-inl.h
+++ b/deps/v8/src/ia32/assembler-ia32-inl.h
@@ -46,21 +46,12 @@ namespace v8 {
namespace internal {
-static const byte kCallOpcode = 0xE8;
-
-
// The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // Relocate entry.
CPU::FlushICache(p, sizeof(uint32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= delta; // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
} else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
// Special handling of js_return when a break point is set (call
// instruction has been inserted).
@@ -178,21 +169,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -230,7 +206,7 @@ Object** RelocInfo::call_object_address() {
bool RelocInfo::IsPatchedReturnSequence() {
- return *pc_ == kCallOpcode;
+ return *pc_ == 0xE8;
}
@@ -251,9 +227,7 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
- #ifdef ENABLE_DEBUGGER_SUPPORT
+#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
@@ -281,8 +255,6 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/ia32/assembler-ia32.cc b/deps/v8/src/ia32/assembler-ia32.cc
index dc20fefd6..f291b0526 100644
--- a/deps/v8/src/ia32/assembler-ia32.cc
+++ b/deps/v8/src/ia32/assembler-ia32.cc
@@ -169,7 +169,7 @@ void Displacement::init(Label* L, Type type) {
const int RelocInfo::kApplyMask =
RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY |
1 << RelocInfo::JS_RETURN | 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::DEBUG_BREAK_SLOT | 1 << RelocInfo::CODE_AGE_SEQUENCE;
+ 1 << RelocInfo::DEBUG_BREAK_SLOT;
bool RelocInfo::IsCodedSpecially() {
@@ -312,19 +312,48 @@ Register Operand::reg() const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
- positions_recorder_(this) {
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...).
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
+ memset(buffer_, 0xCC, buffer_size); // int3
}
#endif
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ // Set up buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
@@ -332,6 +361,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
}
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -1023,25 +1064,6 @@ void Assembler::rcr(Register dst, uint8_t imm8) {
}
}
-void Assembler::ror(Register dst, uint8_t imm8) {
- EnsureSpace ensure_space(this);
- ASSERT(is_uint5(imm8)); // illegal shift count
- if (imm8 == 1) {
- EMIT(0xD1);
- EMIT(0xC8 | dst.code());
- } else {
- EMIT(0xC1);
- EMIT(0xC8 | dst.code());
- EMIT(imm8);
- }
-}
-
-void Assembler::ror_cl(Register dst) {
- EnsureSpace ensure_space(this);
- EMIT(0xD3);
- EMIT(0xC8 | dst.code());
-}
-
void Assembler::sar(Register dst, uint8_t imm8) {
EnsureSpace ensure_space(this);
@@ -1479,7 +1501,7 @@ void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
EnsureSpace ensure_space(this);
- ASSERT(0 <= cc && static_cast<int>(cc) < 16);
+ ASSERT(0 <= cc && cc < 16);
if (L->is_bound()) {
const int short_size = 2;
const int long_size = 6;
@@ -1511,7 +1533,7 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
EnsureSpace ensure_space(this);
- ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
+ ASSERT((0 <= cc) && (cc < 16));
// 0000 1111 1000 tttn #32-bit disp.
EMIT(0x0F);
EMIT(0x80 | cc);
@@ -1966,16 +1988,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x58);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -1986,16 +1998,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0xF2);
- EMIT(0x0F);
- EMIT(0x59);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2103,15 +2105,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
-void Assembler::movmskps(Register dst, XMMRegister src) {
- ASSERT(CpuFeatures::IsEnabled(SSE2));
- EnsureSpace ensure_space(this);
- EMIT(0x0F);
- EMIT(0x50);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
@@ -2401,7 +2394,7 @@ void Assembler::psrlq(XMMRegister dst, XMMRegister src) {
}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle) {
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle) {
ASSERT(CpuFeatures::IsEnabled(SSE2));
EnsureSpace ensure_space(this);
EMIT(0x66);
diff --git a/deps/v8/src/ia32/assembler-ia32.h b/deps/v8/src/ia32/assembler-ia32.h
index ad805c1dc..b0f4651d1 100644
--- a/deps/v8/src/ia32/assembler-ia32.h
+++ b/deps/v8/src/ia32/assembler-ia32.h
@@ -582,7 +582,15 @@ class Assembler : public AssemblerBase {
// upon destruction of the assembler.
// TODO(vitalyr): the assembler does not need an isolate.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ ~Assembler();
+
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -809,8 +817,6 @@ class Assembler : public AssemblerBase {
void rcl(Register dst, uint8_t imm8);
void rcr(Register dst, uint8_t imm8);
- void ror(Register dst, uint8_t imm8);
- void ror_cl(Register dst);
void sar(Register dst, uint8_t imm8);
void sar_cl(Register dst);
@@ -990,10 +996,8 @@ class Assembler : public AssemblerBase {
void cvtsd2ss(XMMRegister dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void xorpd(XMMRegister dst, XMMRegister src);
void xorps(XMMRegister dst, XMMRegister src);
@@ -1015,7 +1019,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
void cmpltsd(XMMRegister dst, XMMRegister src);
void pcmpeqd(XMMRegister dst, XMMRegister src);
@@ -1051,7 +1054,7 @@ class Assembler : public AssemblerBase {
void psllq(XMMRegister dst, XMMRegister src);
void psrlq(XMMRegister reg, int8_t shift);
void psrlq(XMMRegister dst, XMMRegister src);
- void pshufd(XMMRegister dst, XMMRegister src, uint8_t shuffle);
+ void pshufd(XMMRegister dst, XMMRegister src, int8_t shuffle);
void pextrd(Register dst, XMMRegister src, int8_t offset) {
pextrd(Operand(dst), src, offset);
}
@@ -1094,6 +1097,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ int pc_offset() const { return pc_ - buffer_; }
+
// Check if there is less than kGap bytes available in the buffer.
// If this is the case, we need to grow the buffer before emitting
// an instruction or relocation information.
@@ -1112,11 +1117,15 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
+ static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_ ; }
+
void movsd(XMMRegister dst, const Operand& src);
void movsd(const Operand& dst, XMMRegister src);
@@ -1177,10 +1186,22 @@ class Assembler : public AssemblerBase {
friend class CodePatcher;
friend class EnsureSpace;
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
// code generation
+ byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
PositionsRecorder positions_recorder_;
+
+ bool emit_debug_code_;
+ bool predictable_code_size_;
+
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/ia32/builtins-ia32.cc b/deps/v8/src/ia32/builtins-ia32.cc
index 01785bb53..9bc15e909 100644
--- a/deps/v8/src/ia32/builtins-ia32.cc
+++ b/deps/v8/src/ia32/builtins-ia32.cc
@@ -538,42 +538,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ sub(Operand(esp, 0), Immediate(5));
- __ pushad();
- __ mov(eax, Operand(esp, 8 * kPointerSize));
- {
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1, ebx);
- __ mov(Operand(esp, 0), eax);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ popad();
- __ ret(0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/deps/v8/src/ia32/code-stubs-ia32.cc b/deps/v8/src/ia32/code-stubs-ia32.cc
index a70ccbdd1..3b6987e6f 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.cc
+++ b/deps/v8/src/ia32/code-stubs-ia32.cc
@@ -735,12 +735,6 @@ class FloatingPointHelper : public AllStatic {
static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch);
-
- static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register scratch,
- XMMRegister xmm_scratch);
};
@@ -761,20 +755,11 @@ static void IntegerConvert(MacroAssembler* masm,
// Get exponent alone in scratch2.
__ mov(scratch2, scratch);
__ and_(scratch2, HeapNumber::kExponentMask);
- __ shr(scratch2, HeapNumber::kExponentShift);
- __ sub(scratch2, Immediate(HeapNumber::kExponentBias));
- // Load ecx with zero. We use this either for the final shift or
- // for the answer.
- __ xor_(ecx, ecx);
- // If the exponent is above 83, the number contains no significant
- // bits in the range 0..2^31, so the result is zero.
- static const uint32_t kResultIsZeroExponent = 83;
- __ cmp(scratch2, Immediate(kResultIsZeroExponent));
- __ j(above, &done);
if (use_sse3) {
CpuFeatures::Scope scope(SSE3);
// Check whether the exponent is too big for a 64 bit signed integer.
- static const uint32_t kTooBigExponent = 63;
+ static const uint32_t kTooBigExponent =
+ (HeapNumber::kExponentBias + 63) << HeapNumber::kExponentShift;
__ cmp(scratch2, Immediate(kTooBigExponent));
__ j(greater_equal, conversion_failure);
// Load x87 register with heap number.
@@ -786,11 +771,15 @@ static void IntegerConvert(MacroAssembler* masm,
__ mov(ecx, Operand(esp, 0)); // Load low word of answer into ecx.
__ add(esp, Immediate(sizeof(uint64_t))); // Nolint.
} else {
+ // Load ecx with zero. We use this either for the final shift or
+ // for the answer.
+ __ xor_(ecx, ecx);
// Check whether the exponent matches a 32 bit signed int that cannot be
// represented by a Smi. A non-smi 32 bit integer is 1.xxx * 2^30 so the
// exponent is 30 (biased). This is the exponent that we are fastest at and
// also the highest exponent we can handle here.
- const uint32_t non_smi_exponent = 30;
+ const uint32_t non_smi_exponent =
+ (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ cmp(scratch2, Immediate(non_smi_exponent));
// If we have a match of the int32-but-not-Smi exponent then skip some
// logic.
@@ -802,7 +791,8 @@ static void IntegerConvert(MacroAssembler* masm,
{
// Handle a big exponent. The only reason we have this code is that the
// >>> operator has a tendency to generate numbers with an exponent of 31.
- const uint32_t big_non_smi_exponent = 31;
+ const uint32_t big_non_smi_exponent =
+ (HeapNumber::kExponentBias + 31) << HeapNumber::kExponentShift;
__ cmp(scratch2, Immediate(big_non_smi_exponent));
__ j(not_equal, conversion_failure);
// We have the big exponent, typically from >>>. This means the number is
@@ -831,8 +821,19 @@ static void IntegerConvert(MacroAssembler* masm,
}
__ bind(&normal_exponent);
- // Exponent word in scratch, exponent in scratch2. Zero in ecx.
- // We know that 0 <= exponent < 30.
+ // Exponent word in scratch, exponent part of exponent word in scratch2.
+ // Zero in ecx.
+ // We know the exponent is smaller than 30 (biased). If it is less than
+ // 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, i.e.
+ // it rounds to zero.
+ const uint32_t zero_exponent =
+ (HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
+ __ sub(scratch2, Immediate(zero_exponent));
+ // ecx already has a Smi zero.
+ __ j(less, &done, Label::kNear);
+
+ // We have a shifted exponent between 0 and 30 in scratch2.
+ __ shr(scratch2, HeapNumber::kExponentShift);
__ mov(ecx, Immediate(30));
__ sub(ecx, scratch2);
@@ -867,8 +868,8 @@ static void IntegerConvert(MacroAssembler* masm,
__ jmp(&done, Label::kNear);
__ bind(&negative);
__ sub(ecx, scratch2);
+ __ bind(&done);
}
- __ bind(&done);
}
@@ -1191,17 +1192,16 @@ void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) {
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(SSE3);
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
__ push(edx);
__ push(eax);
// Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1210,7 +1210,7 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
@@ -1220,7 +1220,11 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ pop(ecx); // Save return address.
// Left and right arguments are already on top of the stack.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
__ push(Immediate(Smi::FromInt(MinorKey())));
+ __ push(Immediate(Smi::FromInt(op_)));
+ __ push(Immediate(Smi::FromInt(operands_type_)));
__ push(ecx); // Push return address.
@@ -1229,22 +1233,73 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
-static void BinaryOpStub_GenerateSmiCode(
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+}
+
+
+void BinaryOpStub::GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
// 1. Move arguments into edx, eax except for DIV and MOD, which need the
// dividend in eax and edx free for the division. Use eax, ebx for those.
Comment load_comment(masm, "-- Load arguments");
Register left = edx;
Register right = eax;
- if (op == Token::DIV || op == Token::MOD) {
+ if (op_ == Token::DIV || op_ == Token::MOD) {
left = eax;
right = ebx;
__ mov(ebx, eax);
@@ -1257,7 +1312,7 @@ static void BinaryOpStub_GenerateSmiCode(
Label not_smis;
Register combined = ecx;
ASSERT(!left.is(combined) && !right.is(combined));
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
// Perform the operation into eax and smi check the result. Preserve
// eax in case the result is not a smi.
@@ -1301,7 +1356,7 @@ static void BinaryOpStub_GenerateSmiCode(
// eax and check the result if necessary.
Comment perform_smi(masm, "-- Perform smi operation");
Label use_fp_on_smis;
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
// Nothing to do.
break;
@@ -1435,7 +1490,7 @@ static void BinaryOpStub_GenerateSmiCode(
}
// 5. Emit return of result in eax. Some operations have registers pushed.
- switch (op) {
+ switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -1458,9 +1513,9 @@ static void BinaryOpStub_GenerateSmiCode(
// 6. For some operations emit inline code to perform floating point
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
- if (allow_heapnumber_results == BinaryOpStub::NO_HEAPNUMBER_RESULTS) {
+ if (allow_heapnumber_results == NO_HEAPNUMBER_RESULTS) {
__ bind(&use_fp_on_smis);
- switch (op) {
+ switch (op_) {
// Undo the effects of some operations, and some register moves.
case Token::SHL:
// The arguments are saved on the stack, and only used from there.
@@ -1488,8 +1543,8 @@ static void BinaryOpStub_GenerateSmiCode(
}
__ jmp(&not_smis);
} else {
- ASSERT(allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS);
- switch (op) {
+ ASSERT(allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS);
+ switch (op_) {
case Token::SHL:
case Token::SHR: {
Comment perform_float(masm, "-- Perform float operation on smis");
@@ -1500,13 +1555,13 @@ static void BinaryOpStub_GenerateSmiCode(
// Store the result in the HeapNumber and return.
// It's OK to overwrite the arguments on the stack because we
// are about to return.
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ mov(Operand(esp, 1 * kPointerSize), left);
__ mov(Operand(esp, 2 * kPointerSize), Immediate(0));
__ fild_d(Operand(esp, 1 * kPointerSize));
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
} else {
- ASSERT_EQ(Token::SHL, op);
+ ASSERT_EQ(Token::SHL, op_);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
__ cvtsi2sd(xmm0, left);
@@ -1528,7 +1583,7 @@ static void BinaryOpStub_GenerateSmiCode(
Comment perform_float(masm, "-- Perform float operation on smis");
__ bind(&use_fp_on_smis);
// Restore arguments to edx, eax.
- switch (op) {
+ switch (op_) {
case Token::ADD:
// Revert right = right + left.
__ sub(right, left);
@@ -1554,7 +1609,7 @@ static void BinaryOpStub_GenerateSmiCode(
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
FloatingPointHelper::LoadSSE2Smis(masm, ebx);
- switch (op) {
+ switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1564,7 +1619,7 @@ static void BinaryOpStub_GenerateSmiCode(
__ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
} else { // SSE2 not available, use FPU.
FloatingPointHelper::LoadFloatSmis(masm, ebx);
- switch (op) {
+ switch (op_) {
case Token::ADD: __ faddp(1); break;
case Token::SUB: __ fsubp(1); break;
case Token::MUL: __ fmulp(1); break;
@@ -1587,7 +1642,7 @@ static void BinaryOpStub_GenerateSmiCode(
// edx and eax.
Comment done_comment(masm, "-- Enter non-smi code");
__ bind(&not_smis);
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
case Token::SHL:
case Token::SAR:
@@ -1634,11 +1689,9 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NO_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, &call_runtime, NO_HEAPNUMBER_RESULTS);
} else {
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
}
__ bind(&call_runtime);
switch (op_) {
@@ -1663,9 +1716,19 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
}
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1693,11 +1756,6 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
}
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
// Input:
// edx: left operand (tagged)
// eax: right operand (tagged)
@@ -1705,7 +1763,7 @@ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
// eax: result (tagged)
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
// Floating point case.
switch (op_) {
@@ -1718,18 +1776,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_int32;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_int32);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_int32);
- }
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
FloatingPointHelper::CheckSSE2OperandsAreInt32(masm, &not_int32, ecx);
if (op_ == Token::MOD) {
@@ -1745,10 +1791,14 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check result type if it is currently Int32.
if (result_type_ <= BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_int32, xmm0, ecx, xmm2);
+ __ cvttsd2si(ecx, Operand(xmm0));
+ __ cvtsi2sd(xmm2, ecx);
+ __ pcmpeqd(xmm2, xmm0);
+ __ movmskpd(ecx, xmm2);
+ __ test(ecx, Immediate(1));
+ __ j(zero, &not_int32);
}
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
}
@@ -1774,8 +1824,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -1800,14 +1849,10 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label not_floats;
Label not_int32;
Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
+ use_sse3_,
&not_floats);
- FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3,
+ FloatingPointHelper::CheckLoadedIntegersWereInt32(masm, use_sse3_,
&not_int32);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -1880,24 +1925,44 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
case Token::SUB:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
case Token::MUL:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
- return; // Handled above.
+ break;
case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
}
@@ -1946,28 +2011,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label not_floats;
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatures::Scope use_sse2(SSE2);
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- // In theory, we would need the same check in the non-SSE2 case,
- // but since we don't support Crankshaft on such hardware we can
- // afford not to care about precise type feedback.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(edx, &not_floats);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(eax, &not_floats);
- }
FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
- if (left_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm0, ecx, xmm2);
- }
- if (right_type_ == BinaryOpIC::INT32) {
- FloatingPointHelper::CheckSSE2OperandIsInt32(
- masm, &not_floats, xmm1, ecx, xmm2);
- }
switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
@@ -1976,7 +2020,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -1993,8 +2037,7 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2020,12 +2063,8 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
GenerateRegisterArgsPush(masm);
Label not_floats;
Label non_smi_result;
- // We do not check the input arguments here, as any value is
- // unconditionally truncated to an int32 anyway. To get the
- // right optimized code, int32 type feedback is just right.
- bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
+ use_sse3_,
&not_floats);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2097,23 +2136,46 @@ void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
switch (op_) {
case Token::ADD:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
case Token::SUB:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
case Token::MUL:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
case Token::DIV:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
case Token::MOD:
GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
break;
case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
}
@@ -2142,8 +2204,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
UNREACHABLE();
}
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
// Floating point case.
switch (op_) {
@@ -2163,7 +2224,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- BinaryOpStub_GenerateHeapResultAllocation(masm, &call_runtime, mode_);
+ GenerateHeapResultAllocation(masm, &call_runtime);
__ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
} else { // SSE2 not available, use FPU.
@@ -2180,8 +2241,7 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
default: UNREACHABLE();
}
Label after_alloc_failure;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, &after_alloc_failure, mode_);
+ GenerateHeapResultAllocation(masm, &after_alloc_failure);
__ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
__ ret(0);
__ bind(&after_alloc_failure);
@@ -2202,9 +2262,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
case Token::SHL:
case Token::SHR: {
Label non_smi_result;
- bool use_sse3 = platform_specific_bit_;
FloatingPointHelper::LoadUnknownsAsIntegers(masm,
- use_sse3,
+ use_sse3_,
&call_runtime);
switch (op_) {
case Token::BIT_OR: __ or_(eax, ecx); break;
@@ -2271,26 +2330,48 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
// result.
__ bind(&call_runtime);
switch (op_) {
- case Token::ADD:
+ case Token::ADD: {
GenerateAddStrings(masm);
- // Fall through.
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ }
case Token::SUB:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
case Token::MUL:
+ GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
case Token::DIV:
GenerateRegisterArgsPush(masm);
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
break;
case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
break;
default:
UNREACHABLE();
}
- GenerateCallRuntime(masm);
}
@@ -2326,10 +2407,11 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Label* alloc_failure) {
Label skip_allocation;
+ OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in edx is already an object, we skip the
@@ -2841,24 +2923,16 @@ void FloatingPointHelper::LoadSSE2Smis(MacroAssembler* masm,
void FloatingPointHelper::CheckSSE2OperandsAreInt32(MacroAssembler* masm,
Label* non_int32,
Register scratch) {
- CheckSSE2OperandIsInt32(masm, non_int32, xmm0, scratch, xmm2);
- CheckSSE2OperandIsInt32(masm, non_int32, xmm1, scratch, xmm2);
-}
-
-
-void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
- Label* non_int32,
- XMMRegister operand,
- Register scratch,
- XMMRegister xmm_scratch) {
- __ cvttsd2si(scratch, Operand(operand));
- __ cvtsi2sd(xmm_scratch, scratch);
- __ pcmpeqd(xmm_scratch, operand);
- __ movmskps(scratch, xmm_scratch);
- // Two least significant bits should be both set.
- __ not_(scratch);
- __ test(scratch, Immediate(3));
+ __ cvttsd2si(scratch, Operand(xmm0));
+ __ cvtsi2sd(xmm2, scratch);
+ __ ucomisd(xmm0, xmm2);
+ __ j(not_zero, non_int32);
+ __ j(carry, non_int32);
+ __ cvttsd2si(scratch, Operand(xmm1));
+ __ cvtsi2sd(xmm2, scratch);
+ __ ucomisd(xmm1, xmm2);
__ j(not_zero, non_int32);
+ __ j(carry, non_int32);
}
@@ -3113,10 +3187,10 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
- __ fstp(1); // 2^X
+ __ fstp(1);
// Bail out to runtime in case of exceptions in the status word.
__ fnstsw_ax();
__ test_b(eax, 0x5F); // We check for all but precision exception.
@@ -3519,7 +3593,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ pop(eax); // Remove saved parameter count.
__ mov(Operand(esp, 1 * kPointerSize), ecx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -3886,9 +3960,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ test(ecx, ecx);
__ j(zero, &setup_two_byte, Label::kNear);
__ SmiUntag(esi);
- __ lea(ecx, FieldOperand(eax, esi, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, esi, times_1, SeqAsciiString::kHeaderSize));
__ mov(Operand(esp, 3 * kPointerSize), ecx); // Argument 4.
- __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(ecx, FieldOperand(eax, ebx, times_1, SeqAsciiString::kHeaderSize));
__ mov(Operand(esp, 2 * kPointerSize), ecx); // Argument 3.
__ jmp(&setup_rest, Label::kNear);
@@ -4034,7 +4108,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ sub(eax, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
__ test_b(ebx, kStringEncodingMask);
@@ -4247,59 +4321,30 @@ static int NegativeComparisonResult(Condition cc) {
return (cc == greater || cc == greater_equal) ? LESS : GREATER;
}
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::HEAP_NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ cmp(FieldOperand(input, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(not_equal, fail);
- }
- // We could be strict about symbol/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-static void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
- __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
- __ cmp(scratch, kSymbolTag | kStringTag);
- __ j(not_equal, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects;
- Condition cc = GetCondition();
- Label miss;
- CheckInputType(masm, edx, left_, &miss);
- CheckInputType(masm, eax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ mov(ecx, edx);
- __ or_(ecx, eax);
- __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
- __ sub(edx, eax); // Return on the result of the subtraction.
- __ j(no_overflow, &smi_done, Label::kNear);
- __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
- __ bind(&smi_done);
- __ mov(eax, edx);
- __ ret(0);
- __ bind(&non_smi);
+ // Compare two smis if required.
+ if (include_smi_compare_) {
+ Label non_smi, smi_done;
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ JumpIfNotSmi(ecx, &non_smi, Label::kNear);
+ __ sub(edx, eax); // Return on the result of the subtraction.
+ __ j(no_overflow, &smi_done, Label::kNear);
+ __ not_(edx); // Correct sign in case of overflow. edx is never 0 here.
+ __ bind(&smi_done);
+ __ mov(eax, edx);
+ __ ret(0);
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ __ mov(ecx, edx);
+ __ or_(ecx, eax);
+ __ test(ecx, Immediate(kSmiTagMask));
+ __ Assert(not_zero, "Unexpected smi operands.");
+ }
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
@@ -4311,61 +4356,67 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ cmp(eax, edx);
__ j(not_equal, &not_identical);
- if (cc != equal) {
+ if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ cmp(edx, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to factory->nan_value(),
// so we do the second best thing - test it ourselves.
- Label heap_number;
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- Immediate(masm->isolate()->factory()->heap_number_map()));
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical JSObjects. Otherwise return equal.
- __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
- __ j(above_equal, &not_identical);
- }
- __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
- __ ret(0);
-
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if
- // it's not NaN.
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // We only accept QNaNs, which have bit 51 set.
- // Read top bits of double representation (second word of value).
-
- // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
- // all bits in the mask are set. We only need to check the word
- // that contains the exponent and high bit of the mantissa.
- STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
- __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
- __ Set(eax, Immediate(0));
- // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
- // bits.
- __ add(edx, edx);
- __ cmp(edx, kQuietNaNHighBitsMask << 1);
- if (cc == equal) {
- STATIC_ASSERT(EQUAL != 1);
- __ setcc(above_equal, eax);
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
} else {
- Label nan;
- __ j(above_equal, &nan, Label::kNear);
+ Label heap_number;
+ __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
+ Immediate(masm->isolate()->factory()->heap_number_map()));
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc_ != equal) {
+ // Call runtime on identical JSObjects. Otherwise return equal.
+ __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
+ __ j(above_equal, &not_identical);
+ }
__ Set(eax, Immediate(Smi::FromInt(EQUAL)));
__ ret(0);
- __ bind(&nan);
- __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
- __ ret(0);
+
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if
+ // it's not NaN.
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // We only accept QNaNs, which have bit 51 set.
+ // Read top bits of double representation (second word of value).
+
+ // Value is a QNaN if value & kQuietNaNMask == kQuietNaNMask, i.e.,
+ // all bits in the mask are set. We only need to check the word
+ // that contains the exponent and high bit of the mantissa.
+ STATIC_ASSERT(((kQuietNaNHighBitsMask << 1) & 0x80000000u) != 0);
+ __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
+ __ Set(eax, Immediate(0));
+ // Shift value and mask so kQuietNaNHighBitsMask applies to topmost
+ // bits.
+ __ add(edx, edx);
+ __ cmp(edx, kQuietNaNHighBitsMask << 1);
+ if (cc_ == equal) {
+ STATIC_ASSERT(EQUAL != 1);
+ __ setcc(above_equal, eax);
+ __ ret(0);
+ } else {
+ Label nan;
+ __ j(above_equal, &nan, Label::kNear);
+ __ Set(eax, Immediate(Smi::FromInt(EQUAL)));
+ __ ret(0);
+ __ bind(&nan);
+ __ Set(eax, Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
+ __ ret(0);
+ }
}
__ bind(&not_identical);
@@ -4373,7 +4424,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Strict equality can quickly decide whether objects are equal.
// Non-strict object equality is slower, so it is handled later in the stub.
- if (cc == equal && strict()) {
+ if (cc_ == equal && strict_) {
Label slow; // Fallthrough label.
Label not_smis;
// If we're doing a strict equality comparison, we don't have to do
@@ -4444,68 +4495,70 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
// Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- if (CpuFeatures::IsSupported(SSE2)) {
- CpuFeatures::Scope use_sse2(SSE2);
- CpuFeatures::Scope use_cmov(CMOV);
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ if (CpuFeatures::IsSupported(SSE2)) {
+ CpuFeatures::Scope use_sse2(SSE2);
+ CpuFeatures::Scope use_cmov(CMOV);
- FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
- __ ucomisd(xmm0, xmm1);
+ FloatingPointHelper::LoadSSE2Operands(masm, &non_number_comparison);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ mov(eax, 0); // equal
- __ mov(ecx, Immediate(Smi::FromInt(1)));
- __ cmov(above, eax, ecx);
- __ mov(ecx, Immediate(Smi::FromInt(-1)));
- __ cmov(below, eax, ecx);
- __ ret(0);
- } else {
- FloatingPointHelper::CheckFloatOperands(
- masm, &non_number_comparison, ebx);
- FloatingPointHelper::LoadFloatOperand(masm, eax);
- FloatingPointHelper::LoadFloatOperand(masm, edx);
- __ FCmp();
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ mov(eax, 0); // equal
+ __ mov(ecx, Immediate(Smi::FromInt(1)));
+ __ cmov(above, eax, ecx);
+ __ mov(ecx, Immediate(Smi::FromInt(-1)));
+ __ cmov(below, eax, ecx);
+ __ ret(0);
+ } else {
+ FloatingPointHelper::CheckFloatOperands(
+ masm, &non_number_comparison, ebx);
+ FloatingPointHelper::LoadFloatOperand(masm, eax);
+ FloatingPointHelper::LoadFloatOperand(masm, edx);
+ __ FCmp();
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
- Label below_label, above_label;
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ j(below, &below_label, Label::kNear);
- __ j(above, &above_label, Label::kNear);
+ Label below_label, above_label;
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ j(below, &below_label, Label::kNear);
+ __ j(above, &above_label, Label::kNear);
- __ Set(eax, Immediate(0));
- __ ret(0);
+ __ Set(eax, Immediate(0));
+ __ ret(0);
- __ bind(&below_label);
- __ mov(eax, Immediate(Smi::FromInt(-1)));
- __ ret(0);
+ __ bind(&below_label);
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ __ ret(0);
- __ bind(&above_label);
- __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ bind(&above_label);
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ __ ret(0);
+ }
+
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ mov(eax, Immediate(Smi::FromInt(1)));
+ } else {
+ __ mov(eax, Immediate(Smi::FromInt(-1)));
+ }
__ ret(0);
- }
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ mov(eax, Immediate(Smi::FromInt(1)));
- } else {
- __ mov(eax, Immediate(Smi::FromInt(-1)));
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
}
- __ ret(0);
-
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc == equal) {
+ if (cc_ == equal) {
BranchIfNonSymbol(masm, &check_for_strings, eax, ecx);
BranchIfNonSymbol(masm, &check_for_strings, edx, ecx);
@@ -4521,7 +4574,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
&check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc == equal) {
+ if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
edx,
eax,
@@ -4540,7 +4593,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
#endif
__ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
+ if (cc_ == equal && !strict_) {
// Non-strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -4584,11 +4637,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc))));
+ __ push(Immediate(Smi::FromInt(NegativeComparisonResult(cc_))));
}
// Restore return address on the stack.
@@ -4597,9 +4650,19 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
- __ bind(&miss);
- GenerateMiss(masm);
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
+ __ and_(scratch, kIsSymbolMask | kIsNotStringMask);
+ __ cmp(scratch, kSymbolTag | kStringTag);
+ __ j(not_equal, label);
}
@@ -5344,6 +5407,44 @@ Register InstanceofStub::left() { return eax; }
Register InstanceofStub::right() { return edx; }
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+void CompareStub::PrintName(StringStream* stream) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+ bool is_equality = cc_ == equal || cc_ == not_equal;
+ stream->Add("CompareStub_%s", cc_name);
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
+}
+
+
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -5578,8 +5679,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ JumpIfNotBothSequentialAsciiStrings(eax, edx, ebx, ecx, &call_runtime);
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -5596,8 +5697,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
// Get the two characters forming the new string.
- __ movzx_b(ebx, FieldOperand(eax, SeqOneByteString::kHeaderSize));
- __ movzx_b(ecx, FieldOperand(edx, SeqOneByteString::kHeaderSize));
+ __ movzx_b(ebx, FieldOperand(eax, SeqAsciiString::kHeaderSize));
+ __ movzx_b(ecx, FieldOperand(edx, SeqAsciiString::kHeaderSize));
__ bind(&make_two_character_string_no_reload);
__ IncrementCounter(counters->string_add_make_two_char(), 1);
__ AllocateAsciiString(eax, 2, edi, edx, &call_runtime);
@@ -5605,7 +5706,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ shl(ecx, kBitsPerByte);
__ or_(ebx, ecx);
// Set the characters in the new string.
- __ mov_w(FieldOperand(eax, SeqOneByteString::kHeaderSize), ebx);
+ __ mov_w(FieldOperand(eax, SeqAsciiString::kHeaderSize), ebx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -5622,7 +5723,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
__ movzx_b(edi, FieldOperand(edi, Map::kInstanceTypeOffset));
__ and_(ecx, edi);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ecx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -5647,6 +5748,13 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// edi: second instance type.
__ test(ecx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
+ __ mov(ecx, FieldOperand(eax, HeapObject::kMapOffset));
+ __ movzx_b(ecx, FieldOperand(ecx, Map::kInstanceTypeOffset));
+ __ xor_(edi, ecx);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ and_(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ cmp(edi, kAsciiStringTag | kAsciiDataHintTag);
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(ecx, edi, no_reg, &call_runtime);
__ jmp(&allocated);
@@ -5672,10 +5780,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(ecx, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(eax, FieldOperand(eax, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- __ add(eax, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(eax, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ bind(&first_prepared);
__ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
@@ -5693,10 +5801,10 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ test_b(edi, kShortExternalStringMask);
__ j(not_zero, &call_runtime);
__ mov(edx, FieldOperand(edx, ExternalString::kResourceDataOffset));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- __ add(edx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(edx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ bind(&second_prepared);
// Push the addresses of both strings' first characters onto the stack.
@@ -5717,7 +5825,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// eax: result string
__ mov(ecx, eax);
// Locate first character of result.
- __ add(ecx, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(ecx, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load first argument's length and first character location. Account for
// values currently on the stack when fetching arguments from it.
__ mov(edx, Operand(esp, 4 * kPointerSize));
@@ -6027,7 +6135,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe_pop_mask[i]);
// Check if the two characters match.
- __ mov(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ mov(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
__ and_(temp, 0x0000ffff);
__ cmp(chars, temp);
__ j(equal, &found_in_symbol_table);
@@ -6216,7 +6324,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(ebx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -6255,7 +6363,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ mov(edi, FieldOperand(edi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ sub(edi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
@@ -6263,7 +6371,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ push(edx);
__ push(edi);
__ SmiUntag(ecx);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ test_b(ebx, kStringEncodingMask);
__ j(zero, &two_byte_sequential);
@@ -6275,12 +6383,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ mov(edx, esi); // esi used by following code.
// Locate first character of result.
__ mov(edi, eax);
- __ add(edi, Immediate(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ add(edi, Immediate(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Load string argument and locate character of sub string start.
__ pop(esi);
__ pop(ebx);
__ SmiUntag(ebx);
- __ lea(esi, FieldOperand(esi, ebx, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(esi, FieldOperand(esi, ebx, times_1, SeqAsciiString::kHeaderSize));
// eax: result string
// ecx: result length
@@ -6441,9 +6549,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -6498,7 +6606,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ mov(ecx, edx);
__ or_(ecx, eax);
@@ -6524,52 +6632,31 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBER);
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
+ __ mov(ecx, edx);
+ __ and_(ecx, eax);
+ __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(edx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(eax, &miss);
- }
+ __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
+ __ j(not_equal, &maybe_undefined1, Label::kNear);
+ __ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
+ __ j(not_equal, &maybe_undefined2, Label::kNear);
// Inlining the double comparison and falling back to the general compare
- // stub if NaN is involved or SSE2 or CMOV is unsupported.
+ // stub if NaN is involved or SS2 or CMOV is unsupported.
if (CpuFeatures::IsSupported(SSE2) && CpuFeatures::IsSupported(CMOV)) {
CpuFeatures::Scope scope1(SSE2);
CpuFeatures::Scope scope2(CMOV);
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(eax, &right_smi, Label::kNear);
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ mov(ecx, eax); // Can't clobber eax because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm1, ecx);
-
- __ bind(&left);
- __ JumpIfSmi(edx, &left_smi, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
+ // Load left and right operand
__ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ mov(ecx, edx); // Can't clobber edx because we can still jump away.
- __ SmiUntag(ecx);
- __ cvtsi2sd(xmm0, ecx);
+ __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
- __ bind(&done);
- // Compare operands.
+ // Compare operands
__ ucomisd(xmm0, xmm1);
// Don't base result on EFLAGS when a NaN is involved.
@@ -6583,30 +6670,17 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ mov(ecx, Immediate(Smi::FromInt(-1)));
__ cmov(below, eax, ecx);
__ ret(0);
- } else {
- __ mov(ecx, edx);
- __ and_(ecx, eax);
- __ JumpIfSmi(ecx, &generic_stub, Label::kNear);
-
- __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined1, Label::kNear);
- __ cmp(FieldOperand(edx, HeapObject::kMapOffset),
- masm->isolate()->factory()->heap_number_map());
- __ j(not_equal, &maybe_undefined2, Label::kNear);
}
__ bind(&unordered);
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ cmp(eax, Immediate(masm->isolate()->factory()->undefined_value()));
__ j(not_equal, &miss);
- __ JumpIfSmi(edx, &unordered);
__ CmpObjectType(edx, HEAP_NUMBER_TYPE, ecx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -6624,7 +6698,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOL);
+ ASSERT(state_ == CompareIC::SYMBOLS);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -6669,7 +6743,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -6758,7 +6832,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ mov(ecx, edx);
__ and_(ecx, eax);
@@ -7223,7 +7297,13 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
int argument_count = 3;
__ PrepareCallCFunction(argument_count, regs_.scratch0());
__ mov(Operand(esp, 0 * kPointerSize), regs_.object());
- __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.address()); // Slot.
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
+ __ mov(Operand(esp, 1 * kPointerSize), regs_.scratch0()); // Value.
+ }
__ mov(Operand(esp, 2 * kPointerSize),
Immediate(ExternalReference::isolate_address()));
diff --git a/deps/v8/src/ia32/code-stubs-ia32.h b/deps/v8/src/ia32/code-stubs-ia32.h
index 29c16e130..803a711de 100644
--- a/deps/v8/src/ia32/code-stubs-ia32.h
+++ b/deps/v8/src/ia32/code-stubs-ia32.h
@@ -154,6 +154,96 @@ class UnaryOpStub: public CodeStub {
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ use_sse3_ = CpuFeatures::IsSupported(SSE3);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_sse3_(SSE3Bits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_sse3_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 16 bits RRRTTTSOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class SSE3Bits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | SSE3Bits::encode(use_sse3_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* slow,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/deps/v8/src/ia32/codegen-ia32.cc b/deps/v8/src/ia32/codegen-ia32.cc
index 9477bf149..eb6868729 100644
--- a/deps/v8/src/ia32/codegen-ia32.cc
+++ b/deps/v8/src/ia32/codegen-ia32.cc
@@ -102,43 +102,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(SSE2)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // esp[1 * kPointerSize]: raw double input
- // esp[0 * kPointerSize]: return address
- {
- CpuFeatures::Scope use_sse2(SSE2);
- XMMRegister input = xmm1;
- XMMRegister result = xmm2;
- __ movdbl(input, Operand(esp, 1 * kPointerSize));
- __ push(eax);
- __ push(ebx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm0, eax, ebx);
-
- __ pop(ebx);
- __ pop(eax);
- __ movdbl(Operand(esp, 1 * kPointerSize), result);
- __ fld_d(Operand(esp, 1 * kPointerSize));
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
@@ -769,7 +732,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ test(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -788,174 +751,12 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzx_b(result, FieldOperand(string,
index,
times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ test(index, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi index");
- __ test(value, Immediate(kSmiTagMask));
- __ Check(zero, "Non-smi value");
-
- __ cmp(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ cmp(index, Immediate(Smi::FromInt(0)));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ mov(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzx_b(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ and_(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmp(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ mov_b(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- // No need to untag a smi for two-byte addressing.
- __ mov_w(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-static Operand ExpConstant(int index) {
- return Operand::StaticVariable(ExternalReference::math_exp_constants(index));
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(double_scratch));
- ASSERT(!input.is(result));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movdbl(double_scratch, ExpConstant(0));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, ExpConstant(1));
- __ movdbl(result, ExpConstant(2));
- __ j(above_equal, &done);
- __ movdbl(double_scratch, ExpConstant(3));
- __ movdbl(result, ExpConstant(4));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movd(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movdbl(result, ExpConstant(6));
- __ mulsd(double_scratch, ExpConstant(5));
- __ subsd(double_scratch, input);
- __ subsd(result, double_scratch);
- __ movsd(input, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ mov(temp1, temp2);
- __ mulsd(result, ExpConstant(7));
- __ subsd(result, double_scratch);
- __ add(temp1, Immediate(0x1ff800));
- __ addsd(result, ExpConstant(8));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, 11);
- __ shl(temp1, 20);
- __ movd(input, temp1);
- __ pshufd(input, input, static_cast<uint8_t>(0xe1)); // Order: 11 10 00 01
- __ movdbl(double_scratch, Operand::StaticArray(
- temp2, times_8, ExternalReference::math_exp_log_table()));
- __ por(input, double_scratch);
- __ mulsd(result, input);
+ SeqAsciiString::kHeaderSize));
__ bind(&done);
}
#undef __
-static const int kNoCodeAgeSequenceLength = 5;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(ebp);
- patcher.masm()->mov(ebp, esp);
- patcher.masm()->push(esi);
- patcher.masm()->push(edi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start(), RelocInfo::NONE);
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_IA32
diff --git a/deps/v8/src/ia32/codegen-ia32.h b/deps/v8/src/ia32/codegen-ia32.h
index 513727414..f4ab0b50f 100644
--- a/deps/v8/src/ia32/codegen-ia32.h
+++ b/deps/v8/src/ia32/codegen-ia32.h
@@ -88,20 +88,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_
diff --git a/deps/v8/src/ia32/deoptimizer-ia32.cc b/deps/v8/src/ia32/deoptimizer-ia32.cc
index c9ecaccd2..99ad5225b 100644
--- a/deps/v8/src/ia32/deoptimizer-ia32.cc
+++ b/deps/v8/src/ia32/deoptimizer-ia32.cc
@@ -210,6 +210,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x13;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -222,26 +224,31 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
+ // The stack check code matches the pattern:
//
- // sub <profiling_counter>, <delta>
- // jns ok
+ // cmp esp, <limit>
+ // jae ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // sub <profiling_counter>, <delta> ;; Not changed
+ // cmp esp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ } else {
+ ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
+ }
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -265,8 +272,13 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
+ if (FLAG_count_based_interrupts) {
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ } else {
+ *(call_target_address - 3) = kJaeInstruction;
+ *(call_target_address - 2) = kJaeOffset;
+ }
Assembler::set_target_address_at(call_target_address,
check_code->entry());
diff --git a/deps/v8/src/ia32/disasm-ia32.cc b/deps/v8/src/ia32/disasm-ia32.cc
index 9eb0d292c..75b46bd47 100644
--- a/deps/v8/src/ia32/disasm-ia32.cc
+++ b/deps/v8/src/ia32/disasm-ia32.cc
@@ -869,7 +869,6 @@ static const char* F0Mnem(byte f0byte) {
case 0xAF: return "imul";
case 0xA5: return "shld";
case 0xAD: return "shrd";
- case 0xAC: return "shrd"; // 3-operand version.
case 0xAB: return "bts";
default: return NULL;
}
@@ -1040,14 +1039,6 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
NameOfXMMRegister(regop),
NameOfXMMRegister(rm));
data++;
- } else if (f0byte == 0x50) {
- data += 2;
- int mod, regop, rm;
- get_modrm(*data, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s,%s",
- NameOfCPURegister(regop),
- NameOfXMMRegister(rm));
- data++;
} else if ((f0byte & 0xF0) == 0x80) {
data += JumpConditional(data, branch_hint);
} else if (f0byte == 0xBE || f0byte == 0xBF || f0byte == 0xB6 ||
diff --git a/deps/v8/src/ia32/full-codegen-ia32.cc b/deps/v8/src/ia32/full-codegen-ia32.cc
index 50713b5c1..406537d2d 100644
--- a/deps/v8/src/ia32/full-codegen-ia32.cc
+++ b/deps/v8/src/ia32/full-codegen-ia32.cc
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -156,7 +156,6 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- info->set_prologue_offset(masm_->pc_offset());
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -329,27 +328,39 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Stack check");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ // Count based interrupts happen often enough when they are enabled
+ // that the additional stack checks are not necessary (they would
+ // only check for interrupts).
+ ExternalReference stack_limit =
+ ExternalReference::address_of_stack_limit(isolate());
+ __ cmp(esp, Operand::StaticVariable(stack_limit));
+ __ j(above_equal, &ok, Label::kNear);
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ RecordStackCheck(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -358,7 +369,9 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- EmitProfilingCounterReset();
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -741,7 +754,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
+ // The variable in the declaration always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
@@ -870,32 +884,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ Handle<JSModule> instance = declaration->module()->interface()->Instance();
+ ASSERT(!instance.is_null());
- // Load instance object.
- __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
- __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ Visit(declaration->module());
+ break;
+ }
- // Assign it.
- __ mov(ContextOperand(esi, variable->index()), eax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(esi,
- Context::SlotOffset(variable->index()),
- eax,
- ecx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ mov(ContextOperand(esi, variable->index()), Immediate(instance));
+ Visit(declaration->module());
+ break;
+ }
- // Traverse into body.
- Visit(declaration->module());
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
}
@@ -930,21 +945,13 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
__ push(esi); // The context is the first argument.
- __ Push(pairs);
- __ Push(Smi::FromInt(DeclareGlobalsFlags()));
+ __ push(Immediate(pairs));
+ __ push(Immediate(Smi::FromInt(DeclareGlobalsFlags())));
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1186,7 +1193,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ add(Operand(esp, 0 * kPointerSize), Immediate(Smi::FromInt(1)));
- EmitBackEdgeBookkeeping(stmt, &loop);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1339,9 +1346,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ mov(eax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ cmp(eax, isolate()->factory()->the_hole_value());
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -2318,7 +2325,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -3060,38 +3067,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(ecx);
- __ pop(ebx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, eax, ebx, ecx);
- context()->Plug(eax);
-}
-
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3598,10 +3573,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmp(scratch, kStringTag | kOneByteStringTag | kSeqStringTag);
+ __ cmp(scratch, kStringTag | kAsciiStringTag | kSeqStringTag);
__ j(not_equal, &bailout);
__ add(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ add(index, Immediate(1));
__ cmp(index, array_length);
@@ -3630,15 +3605,14 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, Immediate(
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask));
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
__ cmp(scratch, ASCII_STRING_TYPE);
__ j(not_equal, &bailout);
// Add (separator length times array_length) - separator length
// to string_length.
__ mov(scratch, separator_operand);
- __ mov(scratch, FieldOperand(scratch, SeqOneByteString::kLengthOffset));
+ __ mov(scratch, FieldOperand(scratch, SeqAsciiString::kLengthOffset));
__ sub(string_length, scratch); // May be negative, temporarily.
__ imul(scratch, array_length_operand);
__ j(overflow, &bailout);
@@ -3652,11 +3626,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ mov(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
__ mov(string, separator_operand);
- __ cmp(FieldOperand(string, SeqOneByteString::kLengthOffset),
+ __ cmp(FieldOperand(string, SeqAsciiString::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3681,7 +3655,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
__ bind(&loop_1_condition);
@@ -3694,7 +3668,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ mov_b(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ mov_b(separator_operand, scratch);
__ Set(index, Immediate(0));
@@ -3722,7 +3696,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -3751,7 +3725,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ bind(&loop_3_entry);
@@ -3763,7 +3737,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
FieldOperand(string, String::kLengthOffset));
__ shr(string_length, 1);
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(string, result_pos, string_length, scratch);
__ add(index, Immediate(1));
@@ -4312,7 +4286,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
+ Condition cc = no_condition;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
__ pop(edx);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/deps/v8/src/ia32/ic-ia32.cc b/deps/v8/src/ia32/ic-ia32.cc
index ac5af2bf6..dae3bbd63 100644
--- a/deps/v8/src/ia32/ic-ia32.cc
+++ b/deps/v8/src/ia32/ic-ia32.cc
@@ -1715,7 +1715,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-bool CompareIC::HasInlinedSmiCode(Address address) {
+static bool HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1726,6 +1726,40 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
+}
+
+
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.cc b/deps/v8/src/ia32/lithium-codegen-ia32.cc
index e03f73323..32c66a05f 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.cc
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.cc
@@ -176,7 +176,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&do_not_pad);
}
- info()->set_prologue_offset(masm_->pc_offset());
__ push(ebp); // Caller's frame pointer.
__ mov(ebp, esp);
__ push(esi); // Callee's context.
@@ -294,30 +293,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
instr->CompileToNative(this);
}
}
@@ -1044,43 +1020,6 @@ void LCodeGen::DoModI(LModI* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ test(dividend, Operand(dividend));
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmp(dividend, kMinInt);
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ test(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sar(dividend, power);
- }
-
- if (divisor < 0) __ neg(dividend);
-
- return;
- }
-
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(eax));
ASSERT(ToRegister(instr->left()).is(eax));
@@ -1106,7 +1045,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (kMinInt / -1).
+ // Check for (-kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmp(left_reg, kMinInt);
@@ -1361,13 +1300,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
ASSERT(ToRegister(right).is(ecx));
switch (instr->op()) {
- case Token::ROR:
- __ ror_cl(ToRegister(left));
- if (instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- }
- break;
case Token::SAR:
__ sar_cl(ToRegister(left));
break;
@@ -1389,14 +1321,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
- case Token::ROR:
- if (shift_count == 0 && instr->can_deopt()) {
- __ test(ToRegister(left), Immediate(0x80000000));
- DeoptimizeIf(not_zero, instr->environment());
- } else {
- __ ror(ToRegister(left), shift_count);
- }
- break;
case Token::SAR:
if (shift_count != 0) {
__ sar(ToRegister(left), shift_count);
@@ -1587,15 +1511,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -2834,71 +2749,33 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- LOperand* key = instr->key();
- if (!key->IsConstantOperand() &&
- ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
- elements_kind)) {
- __ SmiUntag(ToRegister(key));
- }
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- instr->hydrogen()->key()->representation(),
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movdbl(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsx_b(result, operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movzx_b(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsx_w(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzx_w(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ mov(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ mov(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ test(result, Operand(result));
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register result = ToRegister(instr->result());
+
+ // Load the result.
+ __ mov(result,
+ BuildFastArrayOperand(instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ test(result, Immediate(kSmiTagMask));
+ DeoptimizeIf(not_equal, instr->environment());
+ } else {
+ __ cmp(result, factory()->the_hole_value());
+ DeoptimizeIf(equal, instr->environment());
}
}
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
XMMRegister result = ToDoubleRegister(instr->result());
if (instr->hydrogen()->RequiresHoleCheck()) {
@@ -2925,42 +2802,6 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
}
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
-
- // Load the result.
- __ mov(result,
- BuildFastArrayOperand(instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ test(result, Immediate(kSmiTagMask));
- DeoptimizeIf(not_equal, instr->environment());
- } else {
- __ cmp(result, factory()->the_hole_value());
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
@@ -2970,7 +2811,7 @@ Operand LCodeGen::BuildFastArrayOperand(
uint32_t additional_index) {
Register elements_pointer_reg = ToRegister(elements_pointer);
int shift_size = ElementsKindToShiftSize(elements_kind);
- // Even though the HLoad/StoreKeyed instructions force the input
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
// representation for the key to be an integer, the input gets replaced during
// bound check elimination with the index argument to the bounds check, which
// can be tagged, so that case must be handled here, too.
@@ -2995,6 +2836,71 @@ Operand LCodeGen::BuildFastArrayOperand(
}
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand() &&
+ ExternalArrayOpRequiresTemp(instr->hydrogen()->key()->representation(),
+ elements_kind)) {
+ __ SmiUntag(ToRegister(key));
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ instr->hydrogen()->key()->representation(),
+ elements_kind,
+ 0,
+ instr->additional_index()));
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movdbl(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ movsx_b(result, operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movzx_b(result, operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ movsx_w(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzx_w(result, operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ mov(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ mov(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ test(result, Operand(result));
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->context()).is(esi));
ASSERT(ToRegister(instr->object()).is(edx));
@@ -3647,16 +3553,6 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
@@ -3922,7 +3818,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
LOperand* key = instr->key();
if (!key->IsConstantOperand() &&
@@ -3931,7 +3828,7 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ SmiUntag(ToRegister(key));
}
Operand operand(BuildFastArrayOperand(
- instr->elements(),
+ instr->external_pointer(),
key,
instr->hydrogen()->key()->representation(),
elements_kind,
@@ -3975,39 +3872,13 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- XMMRegister value = ToDoubleRegister(instr->value());
-
- if (instr->NeedsCanonicalization()) {
- Label have_value;
-
- __ ucomisd(value, value);
- __ j(parity_odd, &have_value); // NaN.
-
- ExternalReference canonical_nan_reference =
- ExternalReference::address_of_canonical_non_hole_nan();
- __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
- __ bind(&have_value);
- }
-
- Operand double_store_operand = BuildFastArrayOperand(
- instr->elements(),
- instr->key(),
- instr->hydrogen()->key()->representation(),
- FAST_DOUBLE_ELEMENTS,
- FixedDoubleArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
- __ movdbl(double_store_operand, value);
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
+ Register elements = ToRegister(instr->object());
Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
Operand operand = BuildFastArrayOperand(
- instr->elements(),
+ instr->object(),
instr->key(),
instr->hydrogen()->key()->representation(),
FAST_ELEMENTS,
@@ -4032,15 +3903,30 @@ void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
}
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases...external, fast-double, fast
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ XMMRegister value = ToDoubleRegister(instr->value());
+
+ if (instr->NeedsCanonicalization()) {
+ Label have_value;
+
+ __ ucomisd(value, value);
+ __ j(parity_odd, &have_value); // NaN.
+
+ ExternalReference canonical_nan_reference =
+ ExternalReference::address_of_canonical_non_hole_nan();
+ __ movdbl(value, Operand::StaticVariable(canonical_nan_reference));
+ __ bind(&have_value);
}
+
+ Operand double_store_operand = BuildFastArrayOperand(
+ instr->elements(),
+ instr->key(),
+ instr->hydrogen()->key()->representation(),
+ FAST_DOUBLE_ELEMENTS,
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+ __ movdbl(double_store_operand, value);
}
@@ -4482,7 +4368,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// Check for undefined. Undefined is converted to zero for truncating
// conversions.
__ cmp(input_reg, factory()->undefined_value());
- __ RecordComment("Deferred TaggedToI: cannot truncate");
DeoptimizeIf(not_equal, instr->environment());
__ mov(input_reg, 0);
__ jmp(&done, Label::kNear);
@@ -4503,7 +4388,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ j(less, &convert, Label::kNear);
// Pop FPU stack before deoptimizing.
__ fstp(0);
- __ RecordComment("Deferred TaggedToI: exponent too big");
DeoptimizeIf(no_condition, instr->environment());
// Reserve space for 64 bit answer.
@@ -4529,7 +4413,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
}
} else {
// Deoptimize if we don't have a heap number.
- __ RecordComment("Deferred TaggedToI: not a heap number");
DeoptimizeIf(not_equal, instr->environment());
XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
@@ -4537,16 +4420,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cvttsd2si(input_reg, Operand(xmm0));
__ cvtsi2sd(xmm_temp, Operand(input_reg));
__ ucomisd(xmm0, xmm_temp);
- __ RecordComment("Deferred TaggedToI: lost precision");
DeoptimizeIf(not_equal, instr->environment());
- __ RecordComment("Deferred TaggedToI: NaN");
DeoptimizeIf(parity_even, instr->environment()); // NaN.
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ test(input_reg, Operand(input_reg));
__ j(not_zero, &done);
__ movmskpd(input_reg, xmm0);
__ and_(input_reg, 1);
- __ RecordComment("Deferred TaggedToI: minus zero");
DeoptimizeIf(not_zero, instr->environment());
}
}
@@ -4876,7 +4756,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
diff --git a/deps/v8/src/ia32/lithium-codegen-ia32.h b/deps/v8/src/ia32/lithium-codegen-ia32.h
index 44ddaffcd..6670024db 100644
--- a/deps/v8/src/ia32/lithium-codegen-ia32.h
+++ b/deps/v8/src/ia32/lithium-codegen-ia32.h
@@ -340,12 +340,6 @@ class LCodeGen BASE_EMBEDDED {
int* offset);
void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
// Emits code for pushing either a tagged constant, a (non-double)
// register, or a stack slot operand.
diff --git a/deps/v8/src/ia32/lithium-ia32.cc b/deps/v8/src/ia32/lithium-ia32.cc
index d7ac7a8b1..1d12d23d2 100644
--- a/deps/v8/src/ia32/lithium-ia32.cc
+++ b/deps/v8/src/ia32/lithium-ia32.cc
@@ -179,7 +179,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -299,11 +298,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LMathPowHalf::PrintDataTo(StringStream* stream) {
stream->Add("/pow_half ");
value()->PrintTo(stream);
@@ -413,27 +407,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
+ stream->Add("] <- ");
value()->PrintTo(stream);
}
@@ -1092,14 +1079,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(context,
input);
return DefineSameAsFirst(result);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
} else if (op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* context = UseFixed(instr->context(), esi);
LOperand* input = UseFixedDouble(instr->value(), xmm1);
@@ -1190,11 +1169,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1247,13 +1221,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
// The temporary operand is necessary to ensure that right is not allocated
// into edx.
LOperand* temp = FixedTemp(edx);
@@ -1485,7 +1452,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1650,17 +1617,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(ecx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), ecx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
return AssignEnvironment(new(zone()) LBoundsCheck(
UseRegisterOrConstantAtStart(instr->index()),
@@ -1787,9 +1743,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
+ LOperand* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
+ return AssignEnvironment(result);
}
@@ -1976,38 +1932,59 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* obj = UseRegisterAtStart(instr->object());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
-
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
+ : UseRegisterOrConstant(instr->key());
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+ LLoadKeyedSpecializedArrayElement* result =
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS)
+ ? AssignEnvironment(load_instr)
+ : load_instr;
}
@@ -2022,61 +1999,72 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- ASSERT(instr->key()->representation().IsInteger32() ||
- instr->key()->representation().IsTagged());
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* obj = UseRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- if (instr->value()->representation().IsDouble()) {
- LOperand* object = UseRegisterAtStart(instr->elements());
- LOperand* val = UseTempRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(object, key, val);
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
-
- LOperand* obj = UseRegister(instr->elements());
- LOperand* val = needs_write_barrier
- ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- LOperand* key = needs_write_barrier
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(obj, key, val);
- }
- }
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
ElementsKind elements_kind = instr->elements_kind();
- ASSERT(
+ ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- LOperand* external_pointer = UseRegister(instr->elements());
- // Determine if we need a byte register in this case for the value.
- bool val_is_fixed_register =
- elements_kind == EXTERNAL_BYTE_ELEMENTS ||
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* val = NULL;
+ if (elements_kind == EXTERNAL_BYTE_ELEMENTS ||
elements_kind == EXTERNAL_UNSIGNED_BYTE_ELEMENTS ||
- elements_kind == EXTERNAL_PIXEL_ELEMENTS;
-
- LOperand* val = val_is_fixed_register
- ? UseFixed(instr->value(), eax)
- : UseRegister(instr->value());
+ elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
+ // We need a byte register in this case for the value.
+ val = UseFixed(instr->value(), eax);
+ } else {
+ val = UseRegister(instr->value());
+ }
bool clobbers_key = ExternalArrayOpRequiresTemp(
instr->key()->representation(), elements_kind);
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- return new(zone()) LStoreKeyed(external_pointer,
- key,
- val);
+ : UseRegisterOrConstant(instr->key());
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2331,7 +2319,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
+ for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/deps/v8/src/ia32/lithium-ia32.h b/deps/v8/src/ia32/lithium-ia32.h
index cf8537426..4643f95f4 100644
--- a/deps/v8/src/ia32/lithium-ia32.h
+++ b/deps/v8/src/ia32/lithium-ia32.h
@@ -119,13 +119,14 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastElement) \
+ V(LoadKeyedFastDoubleElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
- V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(MathPowHalf) \
@@ -144,7 +145,6 @@ class LCodeGen;
V(PushArgument) \
V(RegExpLiteral) \
V(Return) \
- V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -152,8 +152,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -616,7 +618,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->representation().IsDouble();
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -641,27 +643,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 2, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LMathPowHalf: public LTemplateInstruction<1, 2, 1> {
public:
LMathPowHalf(LOperand* context, LOperand* value, LOperand* temp) {
@@ -1180,30 +1161,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 2, 0> {
public:
LThrow(LOperand* context, LOperand* value) {
@@ -1432,26 +1389,37 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
- }
- bool is_external() const {
- return hydrogen()->is_external();
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
- virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1469,6 +1437,27 @@ inline static bool ExternalArrayOpRequiresTemp(
}
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
class LLoadKeyedGeneric: public LTemplateInstruction<1, 3, 0> {
public:
LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
@@ -2017,31 +2006,78 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 3, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
+ LStoreKeyedFastElement(LOperand* obj, LOperand* key, LOperand* val) {
inputs_[0] = obj;
inputs_[1] = key;
inputs_[2] = val;
}
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
+ LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = val;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
+
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* val) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = val;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
class LStoreKeyedGeneric: public LTemplateInstruction<0, 4, 0> {
public:
LStoreKeyedGeneric(LOperand* context,
@@ -2196,7 +2232,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.cc b/deps/v8/src/ia32/macro-assembler-ia32.cc
index 14fb8ca85..0d0bf0377 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/macro-assembler-ia32.cc
@@ -507,8 +507,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor,
- int elements_offset) {
+ bool specialize_for_processor) {
Label smi_value, done, maybe_nan, not_nan, is_nan, have_double_value;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -530,14 +529,12 @@ void MacroAssembler::StoreNumberToDoubleElements(
CpuFeatures::Scope use_sse2(SSE2);
movdbl(scratch2, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
+ movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
scratch2);
} else {
fld_d(FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
+ fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
}
jmp(&done);
@@ -564,15 +561,13 @@ void MacroAssembler::StoreNumberToDoubleElements(
if (CpuFeatures::IsSupported(SSE2) && specialize_for_processor) {
CpuFeatures::Scope fscope(SSE2);
cvtsi2sd(scratch2, scratch1);
- movdbl(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset),
+ movdbl(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize),
scratch2);
} else {
push(scratch1);
fild_s(Operand(esp, 0));
pop(scratch1);
- fstp_d(FieldOperand(elements, key, times_4,
- FixedDoubleArray::kHeaderSize - elements_offset));
+ fstp_d(FieldOperand(elements, key, times_4, FixedDoubleArray::kHeaderSize));
}
bind(&done);
}
@@ -1458,14 +1453,14 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
mov(scratch1, length);
ASSERT(kCharSize == 1);
add(scratch1, Immediate(kObjectAlignmentMask));
and_(scratch1, Immediate(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
+ AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
result,
@@ -1493,7 +1488,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
ASSERT(length > 0);
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::SizeFor(length),
+ AllocateInNewSpace(SeqAsciiString::SizeFor(length),
result,
scratch1,
scratch2,
@@ -1920,25 +1915,9 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
mov(edi, Operand::StaticVariable(limit_address));
add(Operand::StaticVariable(level_address), Immediate(1));
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// Call the api function.
call(function_address, RelocInfo::RUNTIME_ENTRY);
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, eax);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
if (!kReturnHandlesDirectly) {
// PrepareCallApiFunction saved pointer to the output slot into
// callee-save register esi.
@@ -2636,7 +2615,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
}
and_(scratch,
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask);
- cmp(scratch, kStringTag | kSeqStringTag | kOneByteStringTag);
+ cmp(scratch, kStringTag | kSeqStringTag | kAsciiStringTag);
j(not_equal, failure);
}
@@ -2659,17 +2638,15 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
// Check that both are flat ASCII strings.
- const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask
- | kStringEncodingMask | kAsciiDataHintTag;
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
// Interleave bits from both instance types and compare them in one check.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
and_(scratch1, kFlatAsciiStringMask);
and_(scratch2, kFlatAsciiStringMask);
- shl(scratch1, 8);
- or_(scratch1, scratch2);
- cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 8));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
+ cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
j(not_equal, failure);
}
@@ -2929,15 +2906,15 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
+ ASSERT(kAsciiStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
add(length, Immediate(0x04));
// Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
// by 2. If we multiply the string length as smi by this, it still
// won't overflow a 32-bit value.
- ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
- ASSERT(SeqOneByteString::kMaxSize <=
+ ASSERT_EQ(SeqAsciiString::kMaxSize, SeqTwoByteString::kMaxSize);
+ ASSERT(SeqAsciiString::kMaxSize <=
static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
imul(length, FieldOperand(value, String::kLengthOffset));
shr(length, 2 + kSmiTagSize + kSmiShiftSize);
diff --git a/deps/v8/src/ia32/macro-assembler-ia32.h b/deps/v8/src/ia32/macro-assembler-ia32.h
index 7abb29b10..e48d0e75c 100644
--- a/deps/v8/src/ia32/macro-assembler-ia32.h
+++ b/deps/v8/src/ia32/macro-assembler-ia32.h
@@ -388,8 +388,7 @@ class MacroAssembler: public Assembler {
Register scratch1,
XMMRegister scratch2,
Label* fail,
- bool specialize_for_processor,
- int offset = 0);
+ bool specialize_for_processor);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
@@ -789,7 +788,6 @@ class MacroAssembler: public Assembler {
// Push a handle value.
void Push(Handle<Object> handle) { push(Immediate(handle)); }
- void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
diff --git a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
index af6a9e44b..622dc4254 100644
--- a/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
@@ -1197,7 +1197,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1228,7 +1228,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/ia32/stub-cache-ia32.cc b/deps/v8/src/ia32/stub-cache-ia32.cc
index c8695c572..f5e2d0589 100644
--- a/deps/v8/src/ia32/stub-cache-ia32.cc
+++ b/deps/v8/src/ia32/stub-cache-ia32.cc
@@ -376,23 +376,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index) {
- if (index.is_header_index()) {
- int offset = index.header_index() * kPointerSize;
+ int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
__ mov(dst, FieldOperand(src, offset));
} else {
- // Adjust for the number of properties stored in the holder.
- int slot = index.field_index() - holder->map()->inobject_properties();
- if (slot < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (slot * kPointerSize);
- __ mov(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = slot * kPointerSize + FixedArray::kHeaderSize;
- __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ mov(dst, FieldOperand(dst, offset));
- }
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ mov(dst, FieldOperand(dst, offset));
}
}
@@ -1041,7 +1036,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1428,7 +1423,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -1523,7 +1518,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
+ Label attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ mov(edi, FieldOperand(edx, JSArray::kElementsOffset));
@@ -1531,7 +1526,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ cmp(FieldOperand(edi, HeapObject::kMapOffset),
Immediate(factory()->fixed_array_map()));
- __ j(not_equal, &check_double);
+ __ j(not_equal, &call_builtin);
// Get the array's length into eax and calculate new length.
__ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
@@ -1562,49 +1557,17 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ret((argc + 1) * kPointerSize);
- __ bind(&check_double);
-
-
- // Check that the elements are in double mode.
- __ cmp(FieldOperand(edi, HeapObject::kMapOffset),
- Immediate(factory()->fixed_double_array_map()));
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into eax and calculate new length.
- __ mov(eax, FieldOperand(edx, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ add(eax, Immediate(Smi::FromInt(argc)));
-
- // Get the elements' length into ecx.
- __ mov(ecx, FieldOperand(edi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmp(eax, ecx);
- __ j(greater, &call_builtin);
-
- __ mov(ecx, Operand(esp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- ecx, edi, eax, ecx, xmm0, &call_builtin, true, argc * kDoubleSize);
-
- // Save new length.
- __ mov(FieldOperand(edx, JSArray::kLengthOffset), eax);
- __ ret((argc + 1) * kPointerSize);
-
__ bind(&with_write_barrier);
__ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
- if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
+ if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
Label fast_object, not_fast_object;
__ CheckFastObjectElements(ebx, &not_fast_object, Label::kNear);
__ jmp(&fast_object);
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(ebx, &call_builtin);
- __ cmp(FieldOperand(ecx, HeapObject::kMapOffset),
- Immediate(factory()->heap_number_map()));
- __ j(equal, &call_builtin);
// edi: elements array
// edx: receiver
// ebx: map
@@ -2993,7 +2956,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- ecx : name
@@ -3193,7 +3156,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// ----------- S t a t e -------------
// -- ecx : key
// -- edx : receiver
@@ -3458,7 +3421,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#endif
// Load the initial map and verify that it is in fact a map.
- // edi: constructor
__ mov(ebx, FieldOperand(edi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ JumpIfSmi(ebx, &generic_stub_call);
@@ -3467,23 +3429,19 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#ifdef DEBUG
// Cannot construct functions this way.
+ // edi: constructor
// ebx: initial map
__ CmpInstanceType(ebx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
+ __ Assert(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject on the heap by moving the new space allocation
// top forward.
+ // edi: constructor
// ebx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
__ movzx_b(ecx, FieldOperand(ebx, Map::kInstanceSizeOffset));
__ shl(ecx, kPointerSizeLog2);
- __ cmp(ecx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, edx, ecx, no_reg,
+ __ AllocateInNewSpace(ecx, edx, ecx, no_reg,
&generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
@@ -3543,6 +3501,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -4353,22 +4312,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// ecx: key
// edx: receiver
// edi: elements
- // Initialize the new FixedDoubleArray.
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
__ mov(FieldOperand(edi, JSObject::kMapOffset),
Immediate(masm->isolate()->factory()->fixed_double_array_map()));
__ mov(FieldOperand(edi, FixedDoubleArray::kLengthOffset),
Immediate(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
- __ StoreNumberToDoubleElements(eax, edi, ecx, ebx, xmm0,
- &transition_elements_kind, true);
-
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ mov(FieldOperand(edi, offset), Immediate(kHoleNanLower32));
- __ mov(FieldOperand(edi, offset + kPointerSize),
- Immediate(kHoleNanUpper32));
- }
-
// Install the new backing store in the JSArray.
__ mov(FieldOperand(edx, JSObject::kElementsOffset), edi);
__ RecordWriteField(edx, JSObject::kElementsOffset, edi, ebx,
@@ -4378,7 +4328,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ add(FieldOperand(edx, JSArray::kLengthOffset),
Immediate(Smi::FromInt(1)));
__ mov(edi, FieldOperand(edx, JSObject::kElementsOffset));
- __ ret(0);
+ __ jmp(&finish_store);
__ bind(&check_capacity);
// eax: value
diff --git a/deps/v8/src/ic-inl.h b/deps/v8/src/ic-inl.h
index 77f409a1e..49b6ef9d0 100644
--- a/deps/v8/src/ic-inl.h
+++ b/deps/v8/src/ic-inl.h
@@ -43,8 +43,7 @@ Address IC::address() const {
Address result = Assembler::target_address_from_return_address(pc());
#ifdef ENABLE_DEBUGGER_SUPPORT
- ASSERT(Isolate::Current() == isolate());
- Debug* debug = isolate()->debug();
+ Debug* debug = Isolate::Current()->debug();
// First check if any break points are active if not just return the address
// of the call.
if (!debug->has_break_points()) return result;
diff --git a/deps/v8/src/ic.cc b/deps/v8/src/ic.cc
index bf2a649f7..dd0bb10e1 100644
--- a/deps/v8/src/ic.cc
+++ b/deps/v8/src/ic.cc
@@ -310,8 +310,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
if (FLAG_type_info_threshold == 0 && !FLAG_watch_ic_patching) {
return;
}
- Isolate* isolate = target->GetHeap()->isolate();
- Code* host = isolate->
+ Code* host = target->GetHeap()->isolate()->
inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
if (host->kind() != Code::FUNCTION) return;
@@ -334,7 +333,7 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
}
if (FLAG_watch_ic_patching) {
host->set_profiler_ticks(0);
- isolate->runtime_profiler()->NotifyICChanged();
+ Isolate::Current()->runtime_profiler()->NotifyICChanged();
}
// TODO(2029): When an optimized function is patched, it would
// be nice to propagate the corresponding type information to its
@@ -415,13 +414,11 @@ void KeyedStoreIC::Clear(Address address, Code* target) {
void CompareIC::Clear(Address address, Code* target) {
- ASSERT(target->major_key() == CodeStub::CompareIC);
- CompareIC::State handler_state;
- Token::Value op;
- ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
- &handler_state, &op);
+ // Only clear ICCompareStubs, we currently cannot clear generic CompareStubs.
+ if (target->major_key() != CodeStub::CompareIC) return;
// Only clear CompareICs that can retain objects.
- if (handler_state != KNOWN_OBJECTS) return;
+ if (target->compare_state() != KNOWN_OBJECTS) return;
+ Token::Value op = CompareIC::ComputeOperation(target);
SetTargetAtAddress(address, GetRawUninitialized(op));
PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
}
@@ -649,7 +646,7 @@ Handle<Code> CallICBase::ComputeMonomorphicStub(LookupResult* lookup,
Handle<JSObject> holder(lookup->holder());
switch (lookup->type()) {
case FIELD: {
- PropertyIndex index = lookup->GetFieldIndex();
+ int index = lookup->GetFieldIndex();
return isolate()->stub_cache()->ComputeCallField(
argc, kind_, extra_state, name, object, holder, index);
}
@@ -1380,11 +1377,6 @@ MaybeObject* StoreIC::Store(State state,
return *value;
}
- // Observed objects are always modified through the runtime.
- if (FLAG_harmony_observation && receiver->map()->is_observed()) {
- return receiver->SetProperty(*name, *value, NONE, strict_mode);
- }
-
// Use specialized code for setting the length of arrays with fast
// properties. Slow properties might indicate redefinition of the
// length property.
@@ -1470,9 +1462,11 @@ void StoreIC::UpdateCaches(LookupResult* lookup,
Handle<Code> code;
switch (type) {
case FIELD:
- code = isolate()->stub_cache()->ComputeStoreField(
- name, receiver, lookup->GetFieldIndex().field_index(),
- Handle<Map>::null(), strict_mode);
+ code = isolate()->stub_cache()->ComputeStoreField(name,
+ receiver,
+ lookup->GetFieldIndex(),
+ Handle<Map>::null(),
+ strict_mode);
break;
case NORMAL:
if (receiver->IsGlobalObject()) {
@@ -1908,8 +1902,7 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
// Update inline cache and stub cache.
- if (FLAG_use_ic && !receiver->IsJSGlobalProxy() &&
- !(FLAG_harmony_observation && receiver->map()->is_observed())) {
+ if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup(isolate());
if (LookupForWrite(receiver, name, &lookup)) {
UpdateCaches(&lookup, state, strict_mode, receiver, name, value);
@@ -1921,10 +1914,8 @@ MaybeObject* KeyedStoreIC::Store(State state,
}
// Do not use ICs for objects that require access checks (including
- // the global object), or are observed.
- bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded() &&
- !(FLAG_harmony_observation && object->IsJSObject() &&
- JSObject::cast(*object)->map()->is_observed());
+ // the global object).
+ bool use_ic = FLAG_use_ic && !object->IsAccessCheckNeeded();
ASSERT(!(use_ic && object->IsJSGlobalProxy()));
if (use_ic) {
@@ -1982,7 +1973,7 @@ void KeyedStoreIC::UpdateCaches(LookupResult* lookup,
switch (type) {
case FIELD:
code = isolate()->stub_cache()->ComputeKeyedStoreField(
- name, receiver, lookup->GetFieldIndex().field_index(),
+ name, receiver, lookup->GetFieldIndex(),
Handle<Map>::null(), strict_mode);
break;
case TRANSITION: {
@@ -2316,10 +2307,11 @@ const char* BinaryOpIC::GetName(TypeInfo type_info) {
switch (type_info) {
case UNINITIALIZED: return "Uninitialized";
case SMI: return "SMI";
- case INT32: return "Int32";
- case HEAP_NUMBER: return "HeapNumber";
+ case INT32: return "Int32s";
+ case HEAP_NUMBER: return "HeapNumbers";
case ODDBALL: return "Oddball";
- case STRING: return "String";
+ case BOTH_STRING: return "BothStrings";
+ case STRING: return "Strings";
case GENERIC: return "Generic";
default: return "Invalid";
}
@@ -2334,6 +2326,7 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
case INT32:
case HEAP_NUMBER:
case ODDBALL:
+ case BOTH_STRING:
case STRING:
return MONOMORPHIC;
case GENERIC:
@@ -2344,6 +2337,58 @@ BinaryOpIC::State BinaryOpIC::ToState(TypeInfo type_info) {
}
+BinaryOpIC::TypeInfo BinaryOpIC::JoinTypes(BinaryOpIC::TypeInfo x,
+ BinaryOpIC::TypeInfo y) {
+ if (x == UNINITIALIZED) return y;
+ if (y == UNINITIALIZED) return x;
+ if (x == y) return x;
+ if (x == BOTH_STRING && y == STRING) return STRING;
+ if (x == STRING && y == BOTH_STRING) return STRING;
+ if (x == STRING || x == BOTH_STRING || y == STRING || y == BOTH_STRING) {
+ return GENERIC;
+ }
+ if (x > y) return x;
+ return y;
+}
+
+
+BinaryOpIC::TypeInfo BinaryOpIC::GetTypeInfo(Handle<Object> left,
+ Handle<Object> right) {
+ ::v8::internal::TypeInfo left_type =
+ ::v8::internal::TypeInfo::TypeFromValue(left);
+ ::v8::internal::TypeInfo right_type =
+ ::v8::internal::TypeInfo::TypeFromValue(right);
+
+ if (left_type.IsSmi() && right_type.IsSmi()) {
+ return SMI;
+ }
+
+ if (left_type.IsInteger32() && right_type.IsInteger32()) {
+ // Platforms with 32-bit Smis have no distinct INT32 type.
+ if (kSmiValueSize == 32) return SMI;
+ return INT32;
+ }
+
+ if (left_type.IsNumber() && right_type.IsNumber()) {
+ return HEAP_NUMBER;
+ }
+
+ // Patching for fast string ADD makes sense even if only one of the
+ // arguments is a string.
+ if (left_type.IsString()) {
+ return right_type.IsString() ? BOTH_STRING : STRING;
+ } else if (right_type.IsString()) {
+ return STRING;
+ }
+
+ // Check for oddball objects.
+ if (left->IsUndefined() && right->IsNumber()) return ODDBALL;
+ if (left->IsNumber() && right->IsUndefined()) return ODDBALL;
+
+ return GENERIC;
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
ASSERT(args.length() == 4);
@@ -2395,72 +2440,25 @@ RUNTIME_FUNCTION(MaybeObject*, UnaryOp_Patch) {
return *result;
}
-
-static BinaryOpIC::TypeInfo TypeInfoFromValue(Handle<Object> value,
- Token::Value op) {
- ::v8::internal::TypeInfo type =
- ::v8::internal::TypeInfo::TypeFromValue(value);
- if (type.IsSmi()) return BinaryOpIC::SMI;
- if (type.IsInteger32()) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- if (type.IsNumber()) return BinaryOpIC::HEAP_NUMBER;
- if (type.IsString()) return BinaryOpIC::STRING;
- if (value->IsUndefined()) {
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- if (kSmiValueSize == 32) return BinaryOpIC::SMI;
- return BinaryOpIC::INT32;
- }
- return BinaryOpIC::ODDBALL;
- }
- return BinaryOpIC::GENERIC;
-}
-
-
-static BinaryOpIC::TypeInfo InputState(BinaryOpIC::TypeInfo old_type,
- Handle<Object> value,
- Token::Value op) {
- BinaryOpIC::TypeInfo new_type = TypeInfoFromValue(value, op);
- if (old_type == BinaryOpIC::STRING) {
- if (new_type == BinaryOpIC::STRING) return new_type;
- return BinaryOpIC::GENERIC;
- }
- return Max(old_type, new_type);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
- ASSERT(args.length() == 3);
+ ASSERT(args.length() == 5);
HandleScope scope(isolate);
Handle<Object> left = args.at<Object>(0);
Handle<Object> right = args.at<Object>(1);
int key = args.smi_at(2);
- Token::Value op = BinaryOpStub::decode_op_from_minor_key(key);
- BinaryOpIC::TypeInfo previous_left, previous_right, unused_previous_result;
- BinaryOpStub::decode_types_from_minor_key(
- key, &previous_left, &previous_right, &unused_previous_result);
+ Token::Value op = static_cast<Token::Value>(args.smi_at(3));
+ BinaryOpIC::TypeInfo previous_type =
+ static_cast<BinaryOpIC::TypeInfo>(args.smi_at(4));
- BinaryOpIC::TypeInfo new_left = InputState(previous_left, left, op);
- BinaryOpIC::TypeInfo new_right = InputState(previous_right, right, op);
+ BinaryOpIC::TypeInfo type = BinaryOpIC::GetTypeInfo(left, right);
+ type = BinaryOpIC::JoinTypes(type, previous_type);
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED;
-
- // STRING is only used for ADD operations.
- if ((new_left == BinaryOpIC::STRING || new_right == BinaryOpIC::STRING) &&
+ if ((type == BinaryOpIC::STRING || type == BinaryOpIC::BOTH_STRING) &&
op != Token::ADD) {
- new_left = new_right = BinaryOpIC::GENERIC;
+ type = BinaryOpIC::GENERIC;
}
-
- BinaryOpIC::TypeInfo new_overall = Max(new_left, new_right);
- BinaryOpIC::TypeInfo previous_overall = Max(previous_left, previous_right);
-
- if (new_overall == BinaryOpIC::SMI && previous_overall == BinaryOpIC::SMI) {
+ if (type == BinaryOpIC::SMI && previous_type == BinaryOpIC::SMI) {
if (op == Token::DIV ||
op == Token::MUL ||
op == Token::SHR ||
@@ -2475,35 +2473,26 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
result_type = BinaryOpIC::INT32;
}
}
- if (new_overall == BinaryOpIC::INT32 &&
- previous_overall == BinaryOpIC::INT32) {
- if (new_left == previous_left && new_right == previous_right) {
- result_type = BinaryOpIC::HEAP_NUMBER;
- }
+ if (type == BinaryOpIC::INT32 && previous_type == BinaryOpIC::INT32) {
+ // We must be here because an operation on two INT32 types overflowed.
+ result_type = BinaryOpIC::HEAP_NUMBER;
}
- BinaryOpStub stub(key, new_left, new_right, result_type);
+ BinaryOpStub stub(key, type, result_type);
Handle<Code> code = stub.GetCode();
if (!code.is_null()) {
-#ifdef DEBUG
if (FLAG_trace_ic) {
- PrintF("[BinaryOpIC in ");
- JavaScriptFrame::PrintTop(stdout, false, true);
- PrintF(" ((%s+%s)->((%s+%s)->%s))#%s @ %p]\n",
- BinaryOpIC::GetName(previous_left),
- BinaryOpIC::GetName(previous_right),
- BinaryOpIC::GetName(new_left),
- BinaryOpIC::GetName(new_right),
+ PrintF("[BinaryOpIC (%s->(%s->%s))#%s]\n",
+ BinaryOpIC::GetName(previous_type),
+ BinaryOpIC::GetName(type),
BinaryOpIC::GetName(result_type),
- Token::Name(op),
- static_cast<void*>(*code));
+ Token::Name(op));
}
-#endif
BinaryOpIC ic(isolate);
ic.patch(*code);
// Activate inlined smi code.
- if (previous_overall == BinaryOpIC::UNINITIALIZED) {
+ if (previous_type == BinaryOpIC::UNINITIALIZED) {
PatchInlinedSmiCode(ic.address(), ENABLE_INLINED_SMI_CHECK);
}
}
@@ -2566,28 +2555,43 @@ RUNTIME_FUNCTION(MaybeObject*, BinaryOp_Patch) {
Code* CompareIC::GetRawUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED);
Code* code = NULL;
- CHECK(stub.FindCodeInCache(&code, Isolate::Current()));
+ CHECK(stub.FindCodeInCache(&code));
return code;
}
Handle<Code> CompareIC::GetUninitialized(Token::Value op) {
- ICCompareStub stub(op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
+ ICCompareStub stub(op, UNINITIALIZED);
return stub.GetCode();
}
+CompareIC::State CompareIC::ComputeState(Code* target) {
+ int key = target->major_key();
+ if (key == CodeStub::Compare) return GENERIC;
+ ASSERT(key == CodeStub::CompareIC);
+ return static_cast<State>(target->compare_state());
+}
+
+
+Token::Value CompareIC::ComputeOperation(Code* target) {
+ ASSERT(target->major_key() == CodeStub::CompareIC);
+ return static_cast<Token::Value>(
+ target->compare_operation() + Token::EQ);
+}
+
+
const char* CompareIC::GetStateName(State state) {
switch (state) {
case UNINITIALIZED: return "UNINITIALIZED";
- case SMI: return "SMI";
- case HEAP_NUMBER: return "HEAP_NUMBER";
- case OBJECT: return "OBJECTS";
+ case SMIS: return "SMIS";
+ case HEAP_NUMBERS: return "HEAP_NUMBERS";
+ case OBJECTS: return "OBJECTS";
case KNOWN_OBJECTS: return "KNOWN_OBJECTS";
- case SYMBOL: return "SYMBOL";
- case STRING: return "STRING";
+ case SYMBOLS: return "SYMBOLS";
+ case STRINGS: return "STRINGS";
case GENERIC: return "GENERIC";
default:
UNREACHABLE();
@@ -2596,67 +2600,28 @@ const char* CompareIC::GetStateName(State state) {
}
-static CompareIC::State InputState(CompareIC::State old_state,
- Handle<Object> value) {
- switch (old_state) {
- case CompareIC::UNINITIALIZED:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
- if (value->IsSymbol()) return CompareIC::SYMBOL;
- if (value->IsString()) return CompareIC::STRING;
- if (value->IsJSObject()) return CompareIC::OBJECT;
- break;
- case CompareIC::SMI:
- if (value->IsSmi()) return CompareIC::SMI;
- if (value->IsHeapNumber()) return CompareIC::HEAP_NUMBER;
- break;
- case CompareIC::HEAP_NUMBER:
- if (value->IsNumber()) return CompareIC::HEAP_NUMBER;
- break;
- case CompareIC::SYMBOL:
- if (value->IsSymbol()) return CompareIC::SYMBOL;
- if (value->IsString()) return CompareIC::STRING;
- break;
- case CompareIC::STRING:
- if (value->IsSymbol() || value->IsString()) return CompareIC::STRING;
- break;
- case CompareIC::OBJECT:
- if (value->IsJSObject()) return CompareIC::OBJECT;
- break;
- case CompareIC::GENERIC:
- break;
- case CompareIC::KNOWN_OBJECTS:
- UNREACHABLE();
- break;
- }
- return CompareIC::GENERIC;
-}
-
-
-CompareIC::State CompareIC::TargetState(State old_state,
- State old_left,
- State old_right,
+CompareIC::State CompareIC::TargetState(State state,
bool has_inlined_smi_code,
Handle<Object> x,
Handle<Object> y) {
- switch (old_state) {
+ switch (state) {
case UNINITIALIZED:
- if (x->IsSmi() && y->IsSmi()) return SMI;
- if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBER;
+ if (x->IsSmi() && y->IsSmi()) return SMIS;
+ if (x->IsNumber() && y->IsNumber()) return HEAP_NUMBERS;
if (Token::IsOrderedRelationalCompareOp(op_)) {
// Ordered comparisons treat undefined as NaN, so the
// HEAP_NUMBER stub will do the right thing.
if ((x->IsNumber() && y->IsUndefined()) ||
(y->IsNumber() && x->IsUndefined())) {
- return HEAP_NUMBER;
+ return HEAP_NUMBERS;
}
}
if (x->IsSymbol() && y->IsSymbol()) {
// We compare symbols as strings if we need to determine
// the order in a non-equality compare.
- return Token::IsEqualityOp(op_) ? SYMBOL : STRING;
+ return Token::IsEqualityOp(op_) ? SYMBOLS : STRINGS;
}
- if (x->IsString() && y->IsString()) return STRING;
+ if (x->IsString() && y->IsString()) return STRINGS;
if (!Token::IsEqualityOp(op_)) return GENERIC;
if (x->IsJSObject() && y->IsJSObject()) {
if (Handle<JSObject>::cast(x)->map() ==
@@ -2664,70 +2629,30 @@ CompareIC::State CompareIC::TargetState(State old_state,
Token::IsEqualityOp(op_)) {
return KNOWN_OBJECTS;
} else {
- return OBJECT;
+ return OBJECTS;
}
}
return GENERIC;
- case SMI:
- return x->IsNumber() && y->IsNumber()
- ? HEAP_NUMBER
+ case SMIS:
+ return has_inlined_smi_code && x->IsNumber() && y->IsNumber()
+ ? HEAP_NUMBERS
: GENERIC;
- case SYMBOL:
+ case SYMBOLS:
ASSERT(Token::IsEqualityOp(op_));
- return x->IsString() && y->IsString() ? STRING : GENERIC;
- case HEAP_NUMBER:
- if (old_left == SMI && x->IsHeapNumber()) return HEAP_NUMBER;
- if (old_right == SMI && y->IsHeapNumber()) return HEAP_NUMBER;
- case STRING:
- case OBJECT:
+ return x->IsString() && y->IsString() ? STRINGS : GENERIC;
+ case HEAP_NUMBERS:
+ case STRINGS:
+ case OBJECTS:
case KNOWN_OBJECTS:
case GENERIC:
return GENERIC;
}
UNREACHABLE();
- return GENERIC; // Make the compiler happy.
-}
-
-
-void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
- HandleScope scope;
- State previous_left, previous_right, previous_state;
- ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
- &previous_right, &previous_state, NULL);
- State new_left = InputState(previous_left, x);
- State new_right = InputState(previous_right, y);
- State state = TargetState(previous_state, previous_left, previous_right,
- HasInlinedSmiCode(address()), x, y);
- ICCompareStub stub(op_, new_left, new_right, state);
- if (state == KNOWN_OBJECTS) {
- stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
- }
- set_target(*stub.GetCode());
-
-#ifdef DEBUG
- if (FLAG_trace_ic) {
- PrintF("[CompareIC in ");
- JavaScriptFrame::PrintTop(stdout, false, true);
- PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
- GetStateName(previous_left),
- GetStateName(previous_right),
- GetStateName(previous_state),
- GetStateName(new_left),
- GetStateName(new_right),
- GetStateName(state),
- Token::Name(op_),
- static_cast<void*>(*stub.GetCode()));
- }
-#endif
-
- // Activate inlined smi code.
- if (previous_state == UNINITIALIZED) {
- PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
- }
+ return GENERIC;
}
-// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
+// Used from ic_<arch>.cc.
RUNTIME_FUNCTION(Code*, CompareIC_Miss) {
NoHandleAllocation na;
ASSERT(args.length() == 3);
diff --git a/deps/v8/src/ic.h b/deps/v8/src/ic.h
index bfccd10a6..8767f988a 100644
--- a/deps/v8/src/ic.h
+++ b/deps/v8/src/ic.h
@@ -771,7 +771,8 @@ class BinaryOpIC: public IC {
INT32,
HEAP_NUMBER,
ODDBALL,
- STRING, // Only used for addition operation.
+ BOTH_STRING, // Only used for addition operation.
+ STRING, // Only used for addition operation. At least one string operand.
GENERIC
};
@@ -782,6 +783,10 @@ class BinaryOpIC: public IC {
static const char* GetName(TypeInfo type_info);
static State ToState(TypeInfo type_info);
+
+ static TypeInfo GetTypeInfo(Handle<Object> left, Handle<Object> right);
+
+ static TypeInfo JoinTypes(TypeInfo x, TypeInfo y);
};
@@ -789,11 +794,11 @@ class CompareIC: public IC {
public:
enum State {
UNINITIALIZED,
- SMI,
- HEAP_NUMBER,
- SYMBOL,
- STRING,
- OBJECT,
+ SMIS,
+ HEAP_NUMBERS,
+ SYMBOLS,
+ STRINGS,
+ OBJECTS,
KNOWN_OBJECTS,
GENERIC
};
@@ -804,27 +809,27 @@ class CompareIC: public IC {
// Update the inline cache for the given operands.
void UpdateCaches(Handle<Object> x, Handle<Object> y);
-
// Factory method for getting an uninitialized compare stub.
static Handle<Code> GetUninitialized(Token::Value op);
// Helper function for computing the condition for a compare operation.
static Condition ComputeCondition(Token::Value op);
+ // Helper function for determining the state of a compare IC.
+ static State ComputeState(Code* target);
+
+ // Helper function for determining the operation a compare IC is for.
+ static Token::Value ComputeOperation(Code* target);
+
static const char* GetStateName(State state);
private:
- static bool HasInlinedSmiCode(Address address);
-
- State TargetState(State old_state,
- State old_left,
- State old_right,
- bool has_inlined_smi_code,
- Handle<Object> x,
- Handle<Object> y);
+ State TargetState(State state, bool has_inlined_smi_code,
+ Handle<Object> x, Handle<Object> y);
bool strict() const { return op_ == Token::EQ_STRICT; }
Condition GetCondition() const { return ComputeCondition(op_); }
+ State GetState() { return ComputeState(target()); }
static Code* GetRawUninitialized(Token::Value op);
diff --git a/deps/v8/src/incremental-marking-inl.h b/deps/v8/src/incremental-marking-inl.h
index 1c30383d5..bbe9a9d20 100644
--- a/deps/v8/src/incremental-marking-inl.h
+++ b/deps/v8/src/incremental-marking-inl.h
@@ -37,27 +37,16 @@ namespace internal {
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
Object** slot,
Object* value) {
- HeapObject* value_heap_obj = HeapObject::cast(value);
- MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+ MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
if (Marking::IsWhite(value_bit)) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- if (chunk->IsLeftOfProgressBar(slot)) {
- WhiteToGreyAndPush(value_heap_obj, value_bit);
- RestartIfNotMarking();
- } else {
- return false;
- }
- } else {
- BlackToGreyAndUnshift(obj, obj_bit);
- RestartIfNotMarking();
- return false;
- }
- } else {
- return false;
+ BlackToGreyAndUnshift(obj, obj_bit);
+ RestartIfNotMarking();
}
+
+ // Object is either grey or white. It will be scanned if survives.
+ return false;
}
if (!is_compacting_) return false;
MarkBit obj_bit = Marking::MarkBitFrom(obj);
@@ -94,10 +83,6 @@ void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) {
MarkBit obj_bit = Marking::MarkBitFrom(obj);
if (Marking::IsBlack(obj_bit)) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- chunk->set_progress_bar(0);
- }
BlackToGreyAndUnshift(obj, obj_bit);
RestartIfNotMarking();
}
diff --git a/deps/v8/src/incremental-marking.cc b/deps/v8/src/incremental-marking.cc
index ef7dbe05f..e51d6c136 100644
--- a/deps/v8/src/incremental-marking.cc
+++ b/deps/v8/src/incremental-marking.cc
@@ -78,7 +78,7 @@ void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
- Object** slot,
+ Object* value,
Isolate* isolate) {
ASSERT(obj->IsHeapObject());
IncrementalMarking* marking = isolate->heap()->incremental_marking();
@@ -94,7 +94,7 @@ void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
MemoryChunk::kWriteBarrierCounterGranularity);
}
- marking->RecordWrite(obj, slot, *slot);
+ marking->RecordWrite(obj, NULL, value);
}
@@ -175,98 +175,13 @@ void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
}
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
- if (obj->IsHeapObject()) {
- HeapObject* heap_obj = HeapObject::cast(obj);
- MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
- if (Marking::IsBlack(mark_bit)) {
- MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
- -heap_obj->Size());
- }
- Marking::AnyToGrey(mark_bit);
- }
-}
-
-
-static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (mark_bit.Get()) return;
- mark_bit.Set();
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
- ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
-static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
- MarkBit mark_bit,
- int size) {
- ASSERT(!Marking::IsImpossible(mark_bit));
- if (Marking::IsBlack(mark_bit)) return;
- Marking::MarkBlack(mark_bit);
- MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
- ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
class IncrementalMarkingMarkingVisitor
: public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
public:
static void Initialize() {
StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
- table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
- table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
- table_.Register(kVisitJSRegExp, &VisitJSRegExp);
- }
-
- static const int kProgressBarScanningChunk = 32 * 1024;
- static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
- MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
- // TODO(mstarzinger): Move setting of the flag to the allocation site of
- // the array. The visitor should just check the flag.
- if (FLAG_use_marking_progress_bar &&
- chunk->owner()->identity() == LO_SPACE) {
- chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
- if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- Heap* heap = map->GetHeap();
- // When using a progress bar for large fixed arrays, scan only a chunk of
- // the array and try to push it onto the marking deque again until it is
- // fully scanned. Fall back to scanning it through to the end in case this
- // fails because of a full deque.
- int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
- int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
- chunk->progress_bar());
- int end_offset = Min(object_size,
- start_offset + kProgressBarScanningChunk);
- bool scan_until_end = false;
- do {
- VisitPointersWithAnchor(heap,
- HeapObject::RawField(object, 0),
- HeapObject::RawField(object, start_offset),
- HeapObject::RawField(object, end_offset));
- start_offset = end_offset;
- end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
- scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
- } while (scan_until_end && start_offset < object_size);
- chunk->set_progress_bar(start_offset);
- if (start_offset < object_size) {
- heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
- }
- } else {
- FixedArrayVisitor::Visit(map, object);
- }
- }
-
- static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
- Context* context = Context::cast(object);
-
- // We will mark cache black with a separate pass
- // when we finish marking.
- MarkObjectGreyDoNotEnqueue(context->normalized_map_cache());
- VisitNativeContext(map, context);
+ table_.Register(kVisitJSRegExp, &VisitJSRegExp);
}
static void VisitJSWeakMap(Map* map, HeapObject* object) {
@@ -296,25 +211,15 @@ class IncrementalMarkingMarkingVisitor
}
}
- INLINE(static void VisitPointersWithAnchor(Heap* heap,
- Object** anchor,
- Object** start,
- Object** end)) {
- for (Object** p = start; p < end; p++) {
- Object* obj = *p;
- if (obj->NonFailureIsHeapObject()) {
- heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
- MarkObject(heap, obj);
- }
- }
- }
-
// Marks the object grey and pushes it on the marking stack.
INLINE(static void MarkObject(Heap* heap, Object* obj)) {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+ if (heap->incremental_marking()->MarkBlackOrKeepGrey(mark_bit)) {
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+ heap_object->Size());
+ }
} else if (Marking::IsWhite(mark_bit)) {
heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
}
@@ -338,9 +243,10 @@ class IncrementalMarkingMarkingVisitor
class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
public:
- explicit IncrementalMarkingRootMarkingVisitor(
- IncrementalMarking* incremental_marking)
- : incremental_marking_(incremental_marking) {
+ IncrementalMarkingRootMarkingVisitor(Heap* heap,
+ IncrementalMarking* incremental_marking)
+ : heap_(heap),
+ incremental_marking_(incremental_marking) {
}
void VisitPointer(Object** p) {
@@ -359,7 +265,10 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
HeapObject* heap_object = HeapObject::cast(obj);
MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
if (mark_bit.data_only()) {
- MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+ if (incremental_marking_->MarkBlackOrKeepGrey(mark_bit)) {
+ MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+ heap_object->Size());
+ }
} else {
if (Marking::IsWhite(mark_bit)) {
incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
@@ -367,6 +276,7 @@ class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
}
}
+ Heap* heap_;
IncrementalMarking* incremental_marking_;
};
@@ -584,6 +494,19 @@ void IncrementalMarking::Start() {
}
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+ if (obj->IsHeapObject()) {
+ HeapObject* heap_obj = HeapObject::cast(obj);
+ MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+ if (Marking::IsBlack(mark_bit)) {
+ MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+ -heap_obj->Size());
+ }
+ Marking::AnyToGrey(mark_bit);
+ }
+}
+
+
void IncrementalMarking::StartMarking(CompactionFlag flag) {
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Start marking\n");
@@ -627,7 +550,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
}
// Mark strong roots grey.
- IncrementalMarkingRootMarkingVisitor visitor(this);
+ IncrementalMarkingRootMarkingVisitor visitor(heap_, this);
heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
// Ready to start incremental marking.
@@ -683,11 +606,8 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
ASSERT(new_top != marking_deque_.bottom());
#ifdef DEBUG
MarkBit mark_bit = Marking::MarkBitFrom(obj);
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
+ (obj->IsFiller() && Marking::IsWhite(mark_bit)));
#endif
}
}
@@ -699,58 +619,6 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
}
-void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
- MarkBit map_mark_bit = Marking::MarkBitFrom(map);
- if (Marking::IsWhite(map_mark_bit)) {
- WhiteToGreyAndPush(map, map_mark_bit);
- }
-
- IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-
- MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#ifdef DEBUG
- MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
- SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
- (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
- (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
- Marking::IsBlack(mark_bit)));
-#endif
- MarkBlackOrKeepBlack(obj, mark_bit, size);
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
- Map* filler_map = heap_->one_pointer_filler_map();
- while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- int size = obj->SizeFromMap(map);
- bytes_to_process -= size;
- VisitObject(map, obj, size);
- }
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque() {
- Map* filler_map = heap_->one_pointer_filler_map();
- while (!marking_deque_.IsEmpty()) {
- HeapObject* obj = marking_deque_.Pop();
-
- // Explicitly skip one word fillers. Incremental markbit patterns are
- // correct only for objects that occupy at least two words.
- Map* map = obj->map();
- if (map == filler_map) continue;
-
- VisitObject(map, obj, obj->SizeFromMap(map));
- }
-}
-
-
void IncrementalMarking::Hurry() {
if (state() == MARKING) {
double start = 0.0;
@@ -760,7 +628,32 @@ void IncrementalMarking::Hurry() {
}
// TODO(gc) hurry can mark objects it encounters black as mutator
// was stopped.
- ProcessMarkingDeque();
+ Map* filler_map = heap_->one_pointer_filler_map();
+ Map* native_context_map = heap_->native_context_map();
+ while (!marking_deque_.IsEmpty()) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) {
+ continue;
+ } else if (map == native_context_map) {
+ // Native contexts have weak fields.
+ IncrementalMarkingMarkingVisitor::VisitNativeContext(map, obj);
+ } else {
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+ }
+
+ MarkBit mark_bit = Marking::MarkBitFrom(obj);
+ ASSERT(!Marking::IsBlack(mark_bit));
+ Marking::MarkBlack(mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+ }
state_ = COMPLETE;
if (FLAG_trace_incremental_marking) {
double end = OS::TimeCurrentMillis();
@@ -881,7 +774,7 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
// allocation), so to reduce the lumpiness we don't use the write barriers
// invoked since last step directly to determine the amount of work to do.
intptr_t bytes_to_process =
- marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
+ marking_speed_ * Max(allocated_, kWriteBarriersInvokedThreshold);
allocated_ = 0;
write_barriers_invoked_since_last_step_ = 0;
@@ -899,7 +792,43 @@ void IncrementalMarking::Step(intptr_t allocated_bytes,
StartMarking(PREVENT_COMPACTION);
}
} else if (state_ == MARKING) {
- ProcessMarkingDeque(bytes_to_process);
+ Map* filler_map = heap_->one_pointer_filler_map();
+ Map* native_context_map = heap_->native_context_map();
+ while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
+ HeapObject* obj = marking_deque_.Pop();
+
+ // Explicitly skip one word fillers. Incremental markbit patterns are
+ // correct only for objects that occupy at least two words.
+ Map* map = obj->map();
+ if (map == filler_map) continue;
+
+ int size = obj->SizeFromMap(map);
+ bytes_to_process -= size;
+ MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+ if (Marking::IsWhite(map_mark_bit)) {
+ WhiteToGreyAndPush(map, map_mark_bit);
+ }
+
+ // TODO(gc) switch to static visitor instead of normal visitor.
+ if (map == native_context_map) {
+ // Native contexts have weak fields.
+ Context* ctx = Context::cast(obj);
+
+ // We will mark cache black with a separate pass
+ // when we finish marking.
+ MarkObjectGreyDoNotEnqueue(ctx->normalized_map_cache());
+
+ IncrementalMarkingMarkingVisitor::VisitNativeContext(map, ctx);
+ } else {
+ IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+ }
+
+ MarkBit obj_mark_bit = Marking::MarkBitFrom(obj);
+ SLOW_ASSERT(Marking::IsGrey(obj_mark_bit) ||
+ (obj->IsFiller() && Marking::IsWhite(obj_mark_bit)));
+ Marking::MarkBlack(obj_mark_bit);
+ MemoryChunk::IncrementLiveBytesFromGC(obj->address(), size);
+ }
if (marking_deque_.IsEmpty()) MarkingComplete(action);
}
diff --git a/deps/v8/src/incremental-marking.h b/deps/v8/src/incremental-marking.h
index fc5a978cc..1a86fcd44 100644
--- a/deps/v8/src/incremental-marking.h
+++ b/deps/v8/src/incremental-marking.h
@@ -127,7 +127,7 @@ class IncrementalMarking {
}
static void RecordWriteFromCode(HeapObject* obj,
- Object** slot,
+ Object* value,
Isolate* isolate);
static void RecordWriteForEvacuationFromCode(HeapObject* obj,
@@ -164,6 +164,19 @@ class IncrementalMarking {
inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+ // Does white->black or keeps gray or black color. Returns true if converting
+ // white to black.
+ inline bool MarkBlackOrKeepGrey(MarkBit mark_bit) {
+ ASSERT(!Marking::IsImpossible(mark_bit));
+ if (mark_bit.Get()) {
+ // Grey or black: Keep the color.
+ return false;
+ }
+ mark_bit.Set();
+ ASSERT(Marking::IsBlack(mark_bit));
+ return true;
+ }
+
inline int steps_count() {
return steps_count_;
}
@@ -246,12 +259,6 @@ class IncrementalMarking {
void EnsureMarkingDequeIsCommitted();
- INLINE(void ProcessMarkingDeque());
-
- INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
-
- INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
-
Heap* heap_;
State state_;
diff --git a/deps/v8/src/interface.cc b/deps/v8/src/interface.cc
index 1634a3711..336be82c6 100644
--- a/deps/v8/src/interface.cc
+++ b/deps/v8/src/interface.cc
@@ -170,8 +170,6 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
ASSERT(that->forward_ == NULL);
ASSERT(!this->IsValue());
ASSERT(!that->IsValue());
- ASSERT(this->index_ == -1);
- ASSERT(that->index_ == -1);
ASSERT(*ok);
#ifdef DEBUG
@@ -196,6 +194,15 @@ void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
return;
}
+ // Merge instance.
+ if (!that->instance_.is_null()) {
+ if (!this->instance_.is_null() && *this->instance_ != *that->instance_) {
+ *ok = false;
+ return;
+ }
+ this->instance_ = that->instance_;
+ }
+
// Merge interfaces.
this->flags_ |= that->flags_;
that->forward_ = this;
@@ -220,7 +227,7 @@ void Interface::Print(int n) {
} else if (IsValue()) {
PrintF("value\n");
} else if (IsModule()) {
- PrintF("module %d %s{", Index(), IsFrozen() ? "" : "(unresolved) ");
+ PrintF("module %s{", IsFrozen() ? "" : "(unresolved) ");
ZoneHashMap* map = Chase()->exports_;
if (map == NULL || map->occupancy() == 0) {
PrintF("}\n");
diff --git a/deps/v8/src/interface.h b/deps/v8/src/interface.h
index f824a9a87..94ef11ba5 100644
--- a/deps/v8/src/interface.h
+++ b/deps/v8/src/interface.h
@@ -108,18 +108,18 @@ class Interface : public ZoneObject {
if (*ok) Chase()->flags_ |= MODULE;
}
+ // Set associated instance object.
+ void MakeSingleton(Handle<JSModule> instance, bool* ok) {
+ *ok = IsModule() && Chase()->instance_.is_null();
+ if (*ok) Chase()->instance_ = instance;
+ }
+
// Do not allow any further refinements, directly or through unification.
void Freeze(bool* ok) {
*ok = IsValue() || IsModule();
if (*ok) Chase()->flags_ |= FROZEN;
}
- // Assign an index.
- void Allocate(int index) {
- ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1);
- Chase()->index_ = index;
- }
-
// ---------------------------------------------------------------------------
// Accessors.
@@ -138,23 +138,7 @@ class Interface : public ZoneObject {
// Check whether this is closed (i.e. fully determined).
bool IsFrozen() { return Chase()->flags_ & FROZEN; }
- bool IsUnified(Interface* that) {
- return Chase() == that->Chase()
- || (this->IsValue() == that->IsValue() &&
- this->IsConst() == that->IsConst());
- }
-
- int Length() {
- ASSERT(IsModule() && IsFrozen());
- ZoneHashMap* exports = Chase()->exports_;
- return exports ? exports->occupancy() : 0;
- }
-
- // The context slot in the hosting global context pointing to this module.
- int Index() {
- ASSERT(IsModule() && IsFrozen());
- return Chase()->index_;
- }
+ Handle<JSModule> Instance() { return Chase()->instance_; }
// Look up an exported name. Returns NULL if not (yet) defined.
Interface* Lookup(Handle<String> name, Zone* zone);
@@ -210,13 +194,12 @@ class Interface : public ZoneObject {
int flags_;
Interface* forward_; // Unification link
ZoneHashMap* exports_; // Module exports and their types (allocated lazily)
- int index_;
+ Handle<JSModule> instance_;
explicit Interface(int flags)
: flags_(flags),
forward_(NULL),
- exports_(NULL),
- index_(-1) {
+ exports_(NULL) {
#ifdef DEBUG
if (FLAG_print_interface_details)
PrintF("# Creating %p\n", static_cast<void*>(this));
diff --git a/deps/v8/src/isolate.cc b/deps/v8/src/isolate.cc
index 15d0bdd44..75e15a454 100644
--- a/deps/v8/src/isolate.cc
+++ b/deps/v8/src/isolate.cc
@@ -426,6 +426,11 @@ char* Isolate::Iterate(ObjectVisitor* v, char* thread_storage) {
}
+void Isolate::IterateThread(ThreadVisitor* v) {
+ v->VisitThread(this, thread_local_top());
+}
+
+
void Isolate::IterateThread(ThreadVisitor* v, char* t) {
ThreadLocalTop* thread = reinterpret_cast<ThreadLocalTop*>(t);
v->VisitThread(this, thread);
@@ -548,97 +553,7 @@ void Isolate::PushStackTraceAndDie(unsigned int magic,
}
-// Determines whether the given stack frame should be displayed in
-// a stack trace. The caller is the error constructor that asked
-// for the stack trace to be collected. The first time a construct
-// call to this function is encountered it is skipped. The seen_caller
-// in/out parameter is used to remember if the caller has been seen
-// yet.
-static bool IsVisibleInStackTrace(StackFrame* raw_frame,
- Object* caller,
- bool* seen_caller) {
- // Only display JS frames.
- if (!raw_frame->is_java_script()) return false;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- Object* raw_fun = frame->function();
- // Not sure when this can happen but skip it just in case.
- if (!raw_fun->IsJSFunction()) return false;
- if ((raw_fun == caller) && !(*seen_caller)) {
- *seen_caller = true;
- return false;
- }
- // Skip all frames until we've seen the caller.
- if (!(*seen_caller)) return false;
- // Also, skip non-visible built-in functions and any call with the builtins
- // object as receiver, so as to not reveal either the builtins object or
- // an internal function.
- // The --builtins-in-stack-traces command line flag allows including
- // internal call sites in the stack trace for debugging purposes.
- if (!FLAG_builtins_in_stack_traces) {
- JSFunction* fun = JSFunction::cast(raw_fun);
- if (frame->receiver()->IsJSBuiltinsObject() ||
- (fun->IsBuiltin() && !fun->shared()->native())) {
- return false;
- }
- }
- return true;
-}
-
-
-Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
- Handle<Object> caller,
- int limit) {
- limit = Max(limit, 0); // Ensure that limit is not negative.
- int initial_size = Min(limit, 10);
- Handle<FixedArray> elements =
- factory()->NewFixedArrayWithHoles(initial_size * 4);
-
- // If the caller parameter is a function we skip frames until we're
- // under it before starting to collect.
- bool seen_caller = !caller->IsJSFunction();
- int cursor = 0;
- int frames_seen = 0;
- for (StackFrameIterator iter(this);
- !iter.done() && frames_seen < limit;
- iter.Advance()) {
- StackFrame* raw_frame = iter.frame();
- if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
- frames_seen++;
- JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
- // Set initial size to the maximum inlining level + 1 for the outermost
- // function.
- List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
- frame->Summarize(&frames);
- for (int i = frames.length() - 1; i >= 0; i--) {
- if (cursor + 4 > elements->length()) {
- int new_capacity = JSObject::NewElementsCapacity(elements->length());
- Handle<FixedArray> new_elements =
- factory()->NewFixedArrayWithHoles(new_capacity);
- for (int i = 0; i < cursor; i++) {
- new_elements->set(i, elements->get(i));
- }
- elements = new_elements;
- }
- ASSERT(cursor + 4 <= elements->length());
-
- Handle<Object> recv = frames[i].receiver();
- Handle<JSFunction> fun = frames[i].function();
- Handle<Code> code = frames[i].code();
- Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
- elements->set(cursor++, *recv);
- elements->set(cursor++, *fun);
- elements->set(cursor++, *code);
- elements->set(cursor++, *offset);
- }
- }
- }
- Handle<JSArray> result = factory()->NewJSArrayWithElements(elements);
- result->set_length(Smi::FromInt(cursor));
- return result;
-}
-
-
-void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
+void Isolate::CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object) {
if (capture_stack_trace_for_uncaught_exceptions_) {
// Capture stack trace for a detailed exception message.
Handle<String> key = factory()->hidden_stack_trace_symbol();
@@ -659,6 +574,8 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
Handle<String> column_key = factory()->LookupAsciiSymbol("column");
Handle<String> line_key = factory()->LookupAsciiSymbol("lineNumber");
Handle<String> script_key = factory()->LookupAsciiSymbol("scriptName");
+ Handle<String> name_or_source_url_key =
+ factory()->LookupAsciiSymbol("nameOrSourceURL");
Handle<String> script_name_or_source_url_key =
factory()->LookupAsciiSymbol("scriptNameOrSourceURL");
Handle<String> function_key = factory()->LookupAsciiSymbol("functionName");
@@ -718,7 +635,18 @@ Handle<JSArray> Isolate::CaptureCurrentStackTrace(
}
if (options & StackTrace::kScriptNameOrSourceURL) {
- Handle<Object> result = GetScriptNameOrSourceURL(script);
+ Handle<Object> script_name(script->name(), this);
+ Handle<JSValue> script_wrapper = GetScriptWrapper(script);
+ Handle<Object> property = GetProperty(script_wrapper,
+ name_or_source_url_key);
+ ASSERT(property->IsJSFunction());
+ Handle<JSFunction> method = Handle<JSFunction>::cast(property);
+ bool caught_exception;
+ Handle<Object> result = Execution::TryCall(method, script_wrapper, 0,
+ NULL, &caught_exception);
+ if (caught_exception) {
+ result = factory()->undefined_value();
+ }
CHECK_NOT_EMPTY_HANDLE(this,
JSObject::SetLocalPropertyIgnoreAttributes(
stack_frame, script_name_or_source_url_key,
@@ -995,28 +923,15 @@ const char* const Isolate::kStackOverflowMessage =
Failure* Isolate::StackOverflow() {
HandleScope scope;
- // At this point we cannot create an Error object using its javascript
- // constructor. Instead, we copy the pre-constructed boilerplate and
- // attach the stack trace as a hidden property.
Handle<String> key = factory()->stack_overflow_symbol();
Handle<JSObject> boilerplate =
Handle<JSObject>::cast(GetProperty(js_builtins_object(), key));
- Handle<JSObject> exception = Copy(boilerplate);
+ Handle<Object> exception = Copy(boilerplate);
+ // TODO(1240995): To avoid having to call JavaScript code to compute
+ // the message for stack overflow exceptions which is very likely to
+ // double fault with another stack overflow exception, we use a
+ // precomputed message.
DoThrow(*exception, NULL);
-
- // Get stack trace limit.
- Handle<Object> error = GetProperty(js_builtins_object(), "$Error");
- if (!error->IsJSObject()) return Failure::Exception();
- Handle<Object> stack_trace_limit =
- GetProperty(Handle<JSObject>::cast(error), "stackTraceLimit");
- if (!stack_trace_limit->IsNumber()) return Failure::Exception();
- int limit = static_cast<int>(stack_trace_limit->Number());
-
- Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
- exception, factory()->undefined_value(), limit);
- JSObject::SetHiddenProperty(exception,
- factory()->hidden_stack_trace_symbol(),
- stack_trace);
return Failure::Exception();
}
@@ -1057,12 +972,9 @@ void Isolate::ScheduleThrow(Object* exception) {
// When scheduling a throw we first throw the exception to get the
// error reporting if it is uncaught before rescheduling it.
Throw(exception);
- PropagatePendingExceptionToExternalTryCatch();
- if (has_pending_exception()) {
- thread_local_top()->scheduled_exception_ = pending_exception();
- thread_local_top()->external_caught_exception_ = false;
- clear_pending_exception();
- }
+ thread_local_top()->scheduled_exception_ = pending_exception();
+ thread_local_top()->external_caught_exception_ = false;
+ clear_pending_exception();
}
@@ -1226,22 +1138,10 @@ void Isolate::DoThrow(Object* exception, MessageLocation* location) {
stack_trace_for_uncaught_exceptions_options_);
}
}
-
- Handle<Object> exception_arg = exception_handle;
- // If the exception argument is a custom object, turn it into a string
- // before throwing as uncaught exception. Note that the pending
- // exception object to be set later must not be turned into a string.
- if (exception_arg->IsJSObject() && !IsErrorObject(exception_arg)) {
- bool failed = false;
- exception_arg = Execution::ToDetailString(exception_arg, &failed);
- if (failed) {
- exception_arg = factory()->LookupAsciiSymbol("exception");
- }
- }
Handle<Object> message_obj = MessageHandler::MakeMessageObject(
"uncaught_exception",
location,
- HandleVector<Object>(&exception_arg, 1),
+ HandleVector<Object>(&exception_handle, 1),
stack_trace,
stack_trace_object);
thread_local_top()->pending_message_obj_ = *message_obj;
@@ -1364,24 +1264,6 @@ void Isolate::ReportPendingMessages() {
}
-MessageLocation Isolate::GetMessageLocation() {
- ASSERT(has_pending_exception());
-
- if (thread_local_top_.pending_exception_ != Failure::OutOfMemoryException() &&
- thread_local_top_.pending_exception_ != heap()->termination_exception() &&
- thread_local_top_.has_pending_message_ &&
- !thread_local_top_.pending_message_obj_->IsTheHole() &&
- thread_local_top_.pending_message_script_ != NULL) {
- Handle<Script> script(thread_local_top_.pending_message_script_);
- int start_pos = thread_local_top_.pending_message_start_pos_;
- int end_pos = thread_local_top_.pending_message_end_pos_;
- return MessageLocation(script, start_pos, end_pos);
- }
-
- return MessageLocation();
-}
-
-
void Isolate::TraceException(bool flag) {
FLAG_trace_exception = flag; // TODO(isolates): This is an unfortunate use.
}
@@ -2044,7 +1926,7 @@ bool Isolate::Init(Deserializer* des) {
// If we are deserializing, log non-function code objects and compiled
// functions found in the snapshot.
- if (!create_heap_objects &&
+ if (create_heap_objects &&
(FLAG_log_code || FLAG_ll_prof || logger_->is_logging_code_events())) {
HandleScope scope;
LOG(this, LogCodeObjects());
diff --git a/deps/v8/src/isolate.h b/deps/v8/src/isolate.h
index ac2e554f8..b90191d0e 100644
--- a/deps/v8/src/isolate.h
+++ b/deps/v8/src/isolate.h
@@ -354,7 +354,6 @@ typedef List<HeapObject*, PreallocatedStorageAllocationPolicy> DebugObjectCache;
V(uint64_t, enabled_cpu_features, 0) \
V(CpuProfiler*, cpu_profiler, NULL) \
V(HeapProfiler*, heap_profiler, NULL) \
- V(bool, observer_delivery_pending, false) \
ISOLATE_DEBUGGER_INIT_LIST(V)
class Isolate {
@@ -716,10 +715,7 @@ class Isolate {
int frame_limit,
StackTrace::StackTraceOptions options);
- Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object,
- Handle<Object> caller,
- int limit);
- void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
+ void CaptureAndSetCurrentStackTraceFor(Handle<JSObject> error_object);
// Returns if the top context may access the given global object. If
// the result is false, the pending exception is guaranteed to be
@@ -743,8 +739,6 @@ class Isolate {
Failure* ReThrow(MaybeObject* exception);
void ScheduleThrow(Object* exception);
void ReportPendingMessages();
- // Return pending location if any or unfilled structure.
- MessageLocation GetMessageLocation();
Failure* ThrowIllegalOperation();
// Promote a scheduled exception to pending. Asserts has_scheduled_exception.
@@ -770,6 +764,7 @@ class Isolate {
void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v, ThreadLocalTop* t);
char* Iterate(ObjectVisitor* v, char* t);
+ void IterateThread(ThreadVisitor* v);
void IterateThread(ThreadVisitor* v, char* t);
@@ -922,6 +917,10 @@ class Isolate {
bool fp_stubs_generated() { return fp_stubs_generated_; }
+ StaticResource<SafeStringInputBuffer>* compiler_safe_string_input_buffer() {
+ return &compiler_safe_string_input_buffer_;
+ }
+
Builtins* builtins() { return &builtins_; }
void NotifyExtensionInstalled() {
@@ -1228,6 +1227,7 @@ class Isolate {
ThreadManager* thread_manager_;
RuntimeState runtime_state_;
bool fp_stubs_generated_;
+ StaticResource<SafeStringInputBuffer> compiler_safe_string_input_buffer_;
Builtins builtins_;
bool has_installed_extensions_;
StringTracker* string_tracker_;
@@ -1394,7 +1394,12 @@ class StackLimitCheck BASE_EMBEDDED {
bool HasOverflowed() const {
StackGuard* stack_guard = isolate_->stack_guard();
- return (reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit());
+ // Stack has overflowed in C++ code only if stack pointer exceeds the C++
+ // stack guard and the limits are not set to interrupt values.
+ // TODO(214): Stack overflows are ignored if a interrupt is pending. This
+ // code should probably always use the initial C++ limit.
+ return (reinterpret_cast<uintptr_t>(this) < stack_guard->climit()) &&
+ stack_guard->IsStackOverflow();
}
private:
Isolate* isolate_;
diff --git a/deps/v8/src/json-parser.h b/deps/v8/src/json-parser.h
index 2f980cc05..40116fa59 100644
--- a/deps/v8/src/json-parser.h
+++ b/deps/v8/src/json-parser.h
@@ -58,7 +58,7 @@ class JsonParser BASE_EMBEDDED {
if (position_ >= source_length_) {
c0_ = kEndOfString;
} else if (seq_ascii) {
- c0_ = seq_source_->SeqOneByteStringGet(position_);
+ c0_ = seq_source_->SeqAsciiStringGet(position_);
} else {
c0_ = source_->Get(position_);
}
@@ -154,15 +154,13 @@ class JsonParser BASE_EMBEDDED {
inline Zone* zone() const { return zone_; }
static const int kInitialSpecialStringLength = 1024;
- static const int kPretenureTreshold = 100 * 1024;
private:
Handle<String> source_;
int source_length_;
- Handle<SeqOneByteString> seq_source_;
+ Handle<SeqAsciiString> seq_source_;
- PretenureFlag pretenure_;
Isolate* isolate_;
Factory* factory_;
Handle<JSFunction> object_constructor_;
@@ -176,17 +174,16 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
Zone* zone) {
isolate_ = source->map()->GetHeap()->isolate();
factory_ = isolate_->factory();
- object_constructor_ = Handle<JSFunction>(
- isolate()->native_context()->object_function(), isolate());
+ object_constructor_ =
+ Handle<JSFunction>(isolate()->native_context()->object_function());
zone_ = zone;
FlattenString(source);
source_ = source;
source_length_ = source_->length();
- pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
// Optimized fast case where we only have ASCII characters.
if (seq_ascii) {
- seq_source_ = Handle<SeqOneByteString>::cast(source_);
+ seq_source_ = Handle<SeqAsciiString>::cast(source_);
}
// Set initial position right before the string.
@@ -195,10 +192,8 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
AdvanceSkipWhitespace();
Handle<Object> result = ParseJsonValue();
if (result.is_null() || c0_ != kEndOfString) {
- // Some exception (for example stack overflow) is already pending.
- if (isolate_->has_pending_exception()) return Handle<Object>::null();
-
// Parse failed. Current character is the unexpected token.
+
const char* message;
Factory* factory = this->factory();
Handle<JSArray> array;
@@ -249,12 +244,6 @@ Handle<Object> JsonParser<seq_ascii>::ParseJson(Handle<String> source,
// Parse any JSON value.
template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
- StackLimitCheck stack_check(isolate_);
- if (stack_check.HasOverflowed()) {
- isolate_->StackOverflow();
- return Handle<Object>::null();
- }
-
if (c0_ == '"') return ParseJsonString();
if ((c0_ >= '0' && c0_ <= '9') || c0_ == '-') return ParseJsonNumber();
if (c0_ == '{') return ParseJsonObject();
@@ -292,7 +281,7 @@ template <bool seq_ascii>
Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Handle<Object> prototype;
Handle<JSObject> json_object =
- factory()->NewJSObject(object_constructor(), pretenure_);
+ factory()->NewJSObject(object_constructor());
ASSERT_EQ(c0_, '{');
AdvanceSkipWhitespace();
@@ -304,56 +293,45 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
Advance();
uint32_t index = 0;
- if (c0_ >= '0' && c0_ <= '9') {
- // Maybe an array index, try to parse it.
- if (c0_ == '0') {
- // With a leading zero, the string has to be "0" only to be an index.
- Advance();
- } else {
- do {
- int d = c0_ - '0';
- if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
- index = (index * 10) + d;
- Advance();
- } while (c0_ >= '0' && c0_ <= '9');
- }
-
- if (c0_ == '"') {
- // Successfully parsed index, parse and store element.
- AdvanceSkipWhitespace();
+ while (c0_ >= '0' && c0_ <= '9') {
+ int d = c0_ - '0';
+ if (index > 429496729U - ((d > 5) ? 1 : 0)) break;
+ index = (index * 10) + d;
+ Advance();
+ }
- if (c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
+ if (position_ != start_position + 1 && c0_ == '"') {
+ AdvanceSkipWhitespace();
- JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
- continue;
- }
- // Not an index, fallback to the slow path.
- }
+ if (c0_ != ':') return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
- position_ = start_position;
+ JSObject::SetOwnElement(json_object, index, value, kNonStrictMode);
+ } else {
+ position_ = start_position;
#ifdef DEBUG
- c0_ = '"';
+ c0_ = '"';
#endif
- Handle<String> key = ParseJsonSymbol();
- if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
+ Handle<String> key = ParseJsonSymbol();
+ if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
- AdvanceSkipWhitespace();
- Handle<Object> value = ParseJsonValue();
- if (value.is_null()) return ReportUnexpectedCharacter();
+ AdvanceSkipWhitespace();
+ Handle<Object> value = ParseJsonValue();
+ if (value.is_null()) return ReportUnexpectedCharacter();
- if (key->Equals(isolate()->heap()->Proto_symbol())) {
- prototype = value;
- } else {
- if (JSObject::TryTransitionToField(json_object, key)) {
- int index = json_object->LastAddedFieldIndex();
- json_object->FastPropertyAtPut(index, *value);
+ if (key->Equals(isolate()->heap()->Proto_symbol())) {
+ prototype = value;
} else {
- JSObject::SetLocalPropertyIgnoreAttributes(
- json_object, key, value, NONE);
+ if (JSObject::TryTransitionToField(json_object, key)) {
+ int index = json_object->LastAddedFieldIndex();
+ json_object->FastPropertyAtPut(index, *value);
+ } else {
+ JSObject::SetLocalPropertyIgnoreAttributes(
+ json_object, key, value, NONE);
+ }
}
}
} while (MatchSkipWhiteSpace(','));
@@ -387,12 +365,11 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
AdvanceSkipWhitespace();
// Allocate a fixed array with all the elements.
Handle<FixedArray> fast_elements =
- factory()->NewFixedArray(elements.length(), pretenure_);
+ factory()->NewFixedArray(elements.length());
for (int i = 0, n = elements.length(); i < n; i++) {
fast_elements->set(i, *elements[i]);
}
- return factory()->NewJSArrayWithElements(
- fast_elements, FAST_ELEMENTS, pretenure_);
+ return factory()->NewJSArrayWithElements(fast_elements);
}
@@ -459,7 +436,7 @@ Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
buffer.Dispose();
}
SkipWhitespace();
- return factory()->NewNumber(number, pretenure_);
+ return factory()->NewNumber(number);
}
@@ -472,27 +449,21 @@ inline void SeqStringSet(Handle<SeqTwoByteString> seq_str, int i, uc32 c) {
}
template <>
-inline void SeqStringSet(Handle<SeqOneByteString> seq_str, int i, uc32 c) {
- seq_str->SeqOneByteStringSet(i, c);
+inline void SeqStringSet(Handle<SeqAsciiString> seq_str, int i, uc32 c) {
+ seq_str->SeqAsciiStringSet(i, c);
}
template <typename StringType>
-inline Handle<StringType> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure);
+inline Handle<StringType> NewRawString(Factory* factory, int length);
template <>
-inline Handle<SeqTwoByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawTwoByteString(length, pretenure);
+inline Handle<SeqTwoByteString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawTwoByteString(length, NOT_TENURED);
}
template <>
-inline Handle<SeqOneByteString> NewRawString(Factory* factory,
- int length,
- PretenureFlag pretenure) {
- return factory->NewRawOneByteString(length, pretenure);
+inline Handle<SeqAsciiString> NewRawString(Factory* factory, int length) {
+ return factory->NewRawAsciiString(length, NOT_TENURED);
}
@@ -506,8 +477,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
int count = end - start;
int max_length = count + source_length_ - position_;
int length = Min(max_length, Max(kInitialSpecialStringLength, 2 * count));
- Handle<StringType> seq_str =
- NewRawString<StringType>(factory(), length, pretenure_);
+ Handle<StringType> seq_str = NewRawString<StringType>(factory(), length);
// Copy prefix into seq_str.
SinkChar* dest = seq_str->GetChars();
String::WriteToFlat(*prefix, dest, start, end);
@@ -530,7 +500,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
SeqStringSet(seq_str, count++, c0_);
Advance();
} else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str, 0, count);
}
} else {
@@ -570,7 +540,7 @@ Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
SeqStringSet(seq_str, count++, value);
break;
} else {
- // StringType is SeqOneByteString and we just read a non-ASCII char.
+ // StringType is SeqAsciiString and we just read a non-ASCII char.
position_ -= 6; // Rewind position_ to \ in \uxxxx.
Advance();
return SlowScanJsonString<SeqTwoByteString, uc16>(seq_str,
@@ -611,7 +581,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
Advance();
if (c0_ == '"') {
AdvanceSkipWhitespace();
- return factory()->empty_string();
+ return Handle<String>(isolate()->heap()->empty_string());
}
if (seq_ascii && is_symbol) {
@@ -626,7 +596,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
c0_ = c0;
int beg_pos = position_;
position_ = position;
- return SlowScanJsonString<SeqOneByteString, char>(source_,
+ return SlowScanJsonString<SeqAsciiString, char>(source_,
beg_pos,
position_);
}
@@ -634,7 +604,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
running_hash = StringHasher::AddCharacterCore(running_hash, c0);
position++;
if (position >= source_length_) return Handle<String>::null();
- c0 = seq_source_->SeqOneByteStringGet(position);
+ c0 = seq_source_->SeqAsciiStringGet(position);
} while (c0 != '"');
int length = position - position_;
uint32_t hash = (length <= String::kMaxHashCalcLength)
@@ -647,17 +617,17 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
uint32_t count = 1;
while (true) {
Object* element = symbol_table->KeyAt(entry);
- if (element == isolate()->heap()->undefined_value()) {
+ if (element == isolate()->heap()->raw_unchecked_undefined_value()) {
// Lookup failure.
break;
}
- if (element != isolate()->heap()->the_hole_value() &&
+ if (element != isolate()->heap()->raw_unchecked_the_hole_value() &&
String::cast(element)->IsAsciiEqualTo(string_vector)) {
// Lookup success, update the current position.
position_ = position;
// Advance past the last '"'.
AdvanceSkipWhitespace();
- return Handle<String>(String::cast(element), isolate());
+ return Handle<String>(String::cast(element));
}
entry = SymbolTable::NextProbe(entry, count++, capacity);
}
@@ -677,7 +647,7 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
position_);
}
} else {
- return SlowScanJsonString<SeqOneByteString, char>(source_,
+ return SlowScanJsonString<SeqAsciiString, char>(source_,
beg_pos,
position_);
}
@@ -686,11 +656,11 @@ Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
Handle<String> result;
if (seq_ascii && is_symbol) {
result = factory()->LookupAsciiSymbol(seq_source_,
- beg_pos,
- length);
+ beg_pos,
+ length);
} else {
- result = factory()->NewRawOneByteString(length, pretenure_);
- char* dest = SeqOneByteString::cast(*result)->GetChars();
+ result = factory()->NewRawAsciiString(length);
+ char* dest = SeqAsciiString::cast(*result)->GetChars();
String::WriteToFlat(*source_, dest, beg_pos, position_);
}
ASSERT_EQ('"', c0_);
diff --git a/deps/v8/src/json-stringifier.h b/deps/v8/src/json-stringifier.h
deleted file mode 100644
index 7a8af30eb..000000000
--- a/deps/v8/src/json-stringifier.h
+++ /dev/null
@@ -1,748 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef V8_JSON_STRINGIFIER_H_
-#define V8_JSON_STRINGIFIER_H_
-
-#include "v8.h"
-#include "v8utils.h"
-#include "v8conversions.h"
-
-namespace v8 {
-namespace internal {
-
-class BasicJsonStringifier BASE_EMBEDDED {
- public:
- explicit BasicJsonStringifier(Isolate* isolate);
-
- MaybeObject* Stringify(Handle<Object> object);
-
- private:
- static const int kInitialPartLength = 32;
- static const int kMaxPartLength = 16 * 1024;
- static const int kPartLengthGrowthFactor = 2;
-
- enum Result { UNCHANGED, SUCCESS, EXCEPTION, CIRCULAR, STACK_OVERFLOW };
-
- void Extend();
-
- void ChangeEncoding();
-
- void ShrinkCurrentPart();
-
- template <bool is_ascii, typename Char>
- INLINE(void Append_(Char c));
-
- template <bool is_ascii, typename Char>
- INLINE(void Append_(const Char* chars));
-
- INLINE(void Append(char c)) {
- if (is_ascii_) {
- Append_<true>(c);
- } else {
- Append_<false>(c);
- }
- }
-
- INLINE(void Append(const char* chars)) {
- if (is_ascii_) {
- Append_<true>(chars);
- } else {
- Append_<false>(chars);
- }
- }
-
- Handle<Object> ApplyToJsonFunction(Handle<Object> object,
- Handle<Object> key);
-
- Result SerializeGeneric(Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key);
-
- // Entry point to serialize the object.
- INLINE(Result SerializeObject(Handle<Object> obj)) {
- return Serialize_<false>(obj, false, factory_->empty_string());
- }
-
- // Serialize an array element.
- // The index may serve as argument for the toJSON function.
- INLINE(Result SerializeElement(Handle<Object> object, int i)) {
- return Serialize_<false>(object, false, Handle<Object>(Smi::FromInt(i)));
- }
-
- // Serialize a object property.
- // The key may or may not be serialized depending on the property.
- // The key may also serve as argument for the toJSON function.
- INLINE(Result SerializeProperty(Handle<Object> object,
- bool deferred_comma,
- Handle<String> deferred_key)) {
- ASSERT(!deferred_key.is_null());
- return Serialize_<true>(object, deferred_comma, deferred_key);
- }
-
- template <bool deferred_string_key>
- Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
-
- void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
- if (deferred_comma) Append(',');
- SerializeString(Handle<String>::cast(deferred_key));
- Append(':');
- }
-
- Result SerializeSmi(Smi* object);
-
- Result SerializeDouble(double number);
- INLINE(Result SerializeHeapNumber(Handle<HeapNumber> object)) {
- return SerializeDouble(object->value());
- }
-
- Result SerializeJSValue(Handle<JSValue> object);
-
- INLINE(Result SerializeJSArray(Handle<JSArray> object));
- INLINE(Result SerializeJSObject(Handle<JSObject> object));
-
- Result SerializeJSArraySlow(Handle<JSArray> object, int length);
-
- void SerializeString(Handle<String> object);
-
- template <typename SrcChar, typename DestChar>
- INLINE(void SerializeStringUnchecked_(const SrcChar* src,
- DestChar* dest,
- int length));
-
- template <bool is_ascii, typename Char>
- INLINE(void SerializeString_(Handle<String> string));
-
- template <typename Char>
- INLINE(bool DoNotEscape(Char c));
-
- template <typename Char>
- INLINE(Vector<const Char> GetCharVector(Handle<String> string));
-
- Result StackPush(Handle<Object> object);
- void StackPop();
-
- INLINE(Handle<String> accumulator()) {
- return Handle<String>(String::cast(accumulator_store_->value()), isolate_);
- }
-
- INLINE(void set_accumulator(Handle<String> string)) {
- return accumulator_store_->set_value(*string);
- }
-
- Isolate* isolate_;
- Factory* factory_;
- // We use a value wrapper for the string accumulator to keep the
- // (indirect) handle to it in the outermost handle scope.
- Handle<JSValue> accumulator_store_;
- Handle<String> current_part_;
- Handle<String> tojson_symbol_;
- Handle<JSArray> stack_;
- int current_index_;
- int part_length_;
- bool is_ascii_;
-
- static const int kJsonEscapeTableEntrySize = 8;
- static const char* const JsonEscapeTable;
-};
-
-
-// Translation table to escape ASCII characters.
-// Table entries start at a multiple of 8 and are null-terminated.
-const char* const BasicJsonStringifier::JsonEscapeTable =
- "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
- "\\u0004\0 \\u0005\0 \\u0006\0 \\u0007\0 "
- "\\b\0 \\t\0 \\n\0 \\u000b\0 "
- "\\f\0 \\r\0 \\u000e\0 \\u000f\0 "
- "\\u0010\0 \\u0011\0 \\u0012\0 \\u0013\0 "
- "\\u0014\0 \\u0015\0 \\u0016\0 \\u0017\0 "
- "\\u0018\0 \\u0019\0 \\u001a\0 \\u001b\0 "
- "\\u001c\0 \\u001d\0 \\u001e\0 \\u001f\0 "
- " \0 !\0 \\\"\0 #\0 "
- "$\0 %\0 &\0 '\0 "
- "(\0 )\0 *\0 +\0 "
- ",\0 -\0 .\0 /\0 "
- "0\0 1\0 2\0 3\0 "
- "4\0 5\0 6\0 7\0 "
- "8\0 9\0 :\0 ;\0 "
- "<\0 =\0 >\0 ?\0 "
- "@\0 A\0 B\0 C\0 "
- "D\0 E\0 F\0 G\0 "
- "H\0 I\0 J\0 K\0 "
- "L\0 M\0 N\0 O\0 "
- "P\0 Q\0 R\0 S\0 "
- "T\0 U\0 V\0 W\0 "
- "X\0 Y\0 Z\0 [\0 "
- "\\\\\0 ]\0 ^\0 _\0 "
- "`\0 a\0 b\0 c\0 "
- "d\0 e\0 f\0 g\0 "
- "h\0 i\0 j\0 k\0 "
- "l\0 m\0 n\0 o\0 "
- "p\0 q\0 r\0 s\0 "
- "t\0 u\0 v\0 w\0 "
- "x\0 y\0 z\0 {\0 "
- "|\0 }\0 ~\0 \177\0 ";
-
-
-BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
- : isolate_(isolate), current_index_(0), is_ascii_(true) {
- factory_ = isolate_->factory();
- accumulator_store_ = Handle<JSValue>::cast(
- factory_->ToObject(factory_->empty_string()));
- part_length_ = kInitialPartLength;
- current_part_ = factory_->NewRawOneByteString(kInitialPartLength);
- tojson_symbol_ = factory_->LookupAsciiSymbol("toJSON");
- stack_ = factory_->NewJSArray(8);
-}
-
-
-MaybeObject* BasicJsonStringifier::Stringify(Handle<Object> object) {
- switch (SerializeObject(object)) {
- case UNCHANGED:
- return isolate_->heap()->undefined_value();
- case SUCCESS:
- ShrinkCurrentPart();
- return *factory_->NewConsString(accumulator(), current_part_);
- case CIRCULAR:
- return isolate_->Throw(*factory_->NewTypeError(
- "circular_structure", HandleVector<Object>(NULL, 0)));
- case STACK_OVERFLOW:
- return isolate_->StackOverflow();
- default:
- return Failure::Exception();
- }
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::Append_(Char c) {
- if (is_ascii) {
- SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
- current_index_++, c);
- } else {
- SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
- current_index_++, c);
- }
- if (current_index_ == part_length_) Extend();
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::Append_(const Char* chars) {
- for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
-}
-
-
-Handle<Object> BasicJsonStringifier::ApplyToJsonFunction(
- Handle<Object> object, Handle<Object> key) {
- LookupResult lookup(isolate_);
- JSObject::cast(*object)->LookupRealNamedProperty(*tojson_symbol_, &lookup);
- if (!lookup.IsProperty()) return object;
- PropertyAttributes attr;
- Handle<Object> fun =
- Object::GetProperty(object, object, &lookup, tojson_symbol_, &attr);
- if (!fun->IsJSFunction()) return object;
-
- // Call toJSON function.
- if (key->IsSmi()) key = factory_->NumberToString(key);
- Handle<Object> argv[] = { key };
- bool has_exception = false;
- HandleScope scope(isolate_);
- object = Execution::Call(fun, object, 1, argv, &has_exception);
- // Return empty handle to signal an exception.
- if (has_exception) return Handle<Object>::null();
- return scope.CloseAndEscape(object);
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
- Handle<Object> object) {
- StackLimitCheck check(isolate_);
- if (check.HasOverflowed()) return STACK_OVERFLOW;
-
- int length = Smi::cast(stack_->length())->value();
- FixedArray* elements = FixedArray::cast(stack_->elements());
- for (int i = 0; i < length; i++) {
- if (elements->get(i) == *object) {
- return CIRCULAR;
- }
- }
- stack_->EnsureSize(length + 1);
- FixedArray::cast(stack_->elements())->set(length, *object);
- stack_->set_length(Smi::FromInt(length + 1));
- return SUCCESS;
-}
-
-
-void BasicJsonStringifier::StackPop() {
- int length = Smi::cast(stack_->length())->value();
- stack_->set_length(Smi::FromInt(length - 1));
-}
-
-
-template <bool deferred_string_key>
-BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
- Handle<Object> object, bool comma, Handle<Object> key) {
- if (object->IsJSObject()) {
- object = ApplyToJsonFunction(object, key);
- if (object.is_null()) return EXCEPTION;
- }
-
- if (object->IsSmi()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeSmi(Smi::cast(*object));
- }
-
- switch (HeapObject::cast(*object)->map()->instance_type()) {
- case HEAP_NUMBER_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
- case ODDBALL_TYPE:
- switch (Oddball::cast(*object)->kind()) {
- case Oddball::kFalse:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("false");
- return SUCCESS;
- case Oddball::kTrue:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("true");
- return SUCCESS;
- case Oddball::kNull:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- Append("null");
- return SUCCESS;
- default:
- return UNCHANGED;
- }
- case JS_ARRAY_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSArray(Handle<JSArray>::cast(object));
- case JS_VALUE_TYPE:
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSValue(Handle<JSValue>::cast(object));
- case JS_FUNCTION_TYPE:
- return UNCHANGED;
- default:
- if (object->IsString()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- SerializeString(Handle<String>::cast(object));
- return SUCCESS;
- } else if (object->IsJSObject()) {
- if (deferred_string_key) SerializeDeferredKey(comma, key);
- return SerializeJSObject(Handle<JSObject>::cast(object));
- } else {
- return SerializeGeneric(object, key, comma, deferred_string_key);
- }
- }
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
- Handle<Object> object,
- Handle<Object> key,
- bool deferred_comma,
- bool deferred_key) {
- Handle<JSObject> builtins(isolate_->native_context()->builtins());
- Handle<JSFunction> builtin =
- Handle<JSFunction>::cast(GetProperty(builtins, "JSONSerializeAdapter"));
-
- Handle<Object> argv[] = { key, object };
- bool has_exception = false;
- Handle<Object> result =
- Execution::Call(builtin, object, 2, argv, &has_exception);
- if (has_exception) return EXCEPTION;
- if (result->IsUndefined()) return UNCHANGED;
- if (deferred_key) {
- if (key->IsSmi()) key = factory_->NumberToString(key);
- SerializeDeferredKey(deferred_comma, key);
- }
-
- Handle<String> result_string = Handle<String>::cast(result);
- // Shrink current part, attach it to the accumulator, also attach the result
- // string to the accumulator, and allocate a new part.
- ShrinkCurrentPart(); // Shrink.
- part_length_ = kInitialPartLength; // Allocate conservatively.
- Extend(); // Attach current part and allocate new part.
- // Attach result string to the accumulator.
- set_accumulator(factory_->NewConsString(accumulator(), result_string));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
- Handle<JSValue> object) {
- bool has_exception = false;
- String* class_name = object->class_name();
- if (class_name == isolate_->heap()->String_symbol()) {
- Handle<Object> value = Execution::ToString(object, &has_exception);
- if (has_exception) return EXCEPTION;
- SerializeString(Handle<String>::cast(value));
- } else if (class_name == isolate_->heap()->Number_symbol()) {
- Handle<Object> value = Execution::ToNumber(object, &has_exception);
- if (has_exception) return EXCEPTION;
- if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
- SerializeHeapNumber(Handle<HeapNumber>::cast(value));
- } else {
- ASSERT(class_name == isolate_->heap()->Boolean_symbol());
- Object* value = JSValue::cast(*object)->value();
- ASSERT(value->IsBoolean());
- Append(value->IsTrue() ? "true" : "false");
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- Append(IntToCString(object->value(), buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
- double number) {
- if (isinf(number) || isnan(number)) {
- Append("null");
- return SUCCESS;
- }
- static const int kBufferSize = 100;
- char chars[kBufferSize];
- Vector<char> buffer(chars, kBufferSize);
- Append(DoubleToCString(number, buffer));
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
- Handle<JSArray> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- int length = Smi::cast(object->length())->value();
- Append('[');
- switch (object->GetElementsKind()) {
- case FAST_SMI_ELEMENTS: {
- Handle<FixedArray> elements(
- FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- SerializeSmi(Smi::cast(elements->get(i)));
- }
- break;
- }
- case FAST_DOUBLE_ELEMENTS: {
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- SerializeDouble(elements->get_scalar(i));
- }
- break;
- }
- case FAST_ELEMENTS: {
- Handle<FixedArray> elements(
- FixedArray::cast(object->elements()), isolate_);
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- Result result =
- SerializeElement(Handle<Object>(elements->get(i), isolate_), i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- Append("null");
- } else {
- return result;
- }
- }
- break;
- }
- // TODO(yangguo): The FAST_HOLEY_* cases could be handled in a faster way.
- // They resemble the non-holey cases except that a prototype chain lookup
- // is necessary for holes.
- default: {
- Result result = SerializeJSArraySlow(object, length);
- if (result != SUCCESS) return result;
- break;
- }
- }
- Append(']');
- StackPop();
- current_part_ = handle_scope.CloseAndEscape(current_part_);
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
- Handle<JSArray> object, int length) {
- for (int i = 0; i < length; i++) {
- if (i > 0) Append(',');
- Handle<Object> element = Object::GetElement(object, i);
- if (element->IsUndefined()) {
- Append("null");
- } else {
- Result result = SerializeElement(element, i);
- if (result == SUCCESS) continue;
- if (result == UNCHANGED) {
- Append("null");
- } else {
- return result;
- }
- }
- }
- return SUCCESS;
-}
-
-
-BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
- Handle<JSObject> object) {
- HandleScope handle_scope(isolate_);
- Result stack_push = StackPush(object);
- if (stack_push != SUCCESS) return stack_push;
- if (object->IsJSGlobalProxy()) {
- object = Handle<JSObject>(
- JSObject::cast(object->GetPrototype()), isolate_);
- ASSERT(object->IsGlobalObject());
- }
-
- Append('{');
- bool comma = false;
-
- if (object->HasFastProperties() &&
- !object->HasIndexedInterceptor() &&
- !object->HasNamedInterceptor() &&
- object->elements()->length() == 0) {
- Handle<Map> map(object->map());
- for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
- Handle<String> key(map->instance_descriptors()->GetKey(i), isolate_);
- PropertyDetails details = map->instance_descriptors()->GetDetails(i);
- if (details.IsDontEnum() || details.IsDeleted()) continue;
- Handle<Object> property;
- if (details.type() == FIELD && *map == object->map()) {
- property = Handle<Object>(
- object->FastPropertyAt(
- map->instance_descriptors()->GetFieldIndex(i)),
- isolate_);
- } else {
- property = GetProperty(object, key);
- if (property.is_null()) return EXCEPTION;
- }
- Result result = SerializeProperty(property, comma, key);
- if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
- }
- } else {
- bool has_exception = false;
- Handle<FixedArray> contents =
- GetKeysInFixedArrayFor(object, LOCAL_ONLY, &has_exception);
- if (has_exception) return EXCEPTION;
-
- for (int i = 0; i < contents->length(); i++) {
- Object* key = contents->get(i);
- Handle<String> key_handle;
- Handle<Object> property;
- if (key->IsString()) {
- key_handle = Handle<String>(String::cast(key), isolate_);
- property = GetProperty(object, key_handle);
- } else {
- ASSERT(key->IsNumber());
- key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
- uint32_t index;
- if (key->IsSmi()) {
- property = Object::GetElement(object, Smi::cast(key)->value());
- } else if (key_handle->AsArrayIndex(&index)) {
- property = Object::GetElement(object, index);
- } else {
- property = GetProperty(object, key_handle);
- }
- }
- if (property.is_null()) return EXCEPTION;
- Result result = SerializeProperty(property, comma, key_handle);
- if (!comma && result == SUCCESS) comma = true;
- if (result >= EXCEPTION) return result;
- }
- }
-
- Append('}');
- StackPop();
- current_part_ = handle_scope.CloseAndEscape(current_part_);
- return SUCCESS;
-}
-
-
-void BasicJsonStringifier::ShrinkCurrentPart() {
- ASSERT(current_index_ < part_length_);
- current_part_ = Handle<String>(
- SeqString::cast(*current_part_)->Truncate(current_index_), isolate_);
-}
-
-
-void BasicJsonStringifier::Extend() {
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
- if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
- part_length_ *= kPartLengthGrowthFactor;
- }
- if (is_ascii_) {
- current_part_ = factory_->NewRawOneByteString(part_length_);
- } else {
- current_part_ = factory_->NewRawTwoByteString(part_length_);
- }
- current_index_ = 0;
-}
-
-
-void BasicJsonStringifier::ChangeEncoding() {
- ShrinkCurrentPart();
- set_accumulator(factory_->NewConsString(accumulator(), current_part_));
- current_part_ = factory_->NewRawTwoByteString(part_length_);
- current_index_ = 0;
- is_ascii_ = false;
-}
-
-
-template <typename SrcChar, typename DestChar>
-void BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
- DestChar* dest,
- int length) {
- dest += current_index_;
- DestChar* dest_start = dest;
-
- // Assert that uc16 character is not truncated down to 8 bit.
- // The <uc16, char> version of this method must not be called.
- ASSERT(sizeof(*dest) >= sizeof(*src));
-
- for (int i = 0; i < length; i++) {
- SrcChar c = src[i];
- if (DoNotEscape(c)) {
- *(dest++) = static_cast<DestChar>(c);
- } else {
- const char* chars = &JsonEscapeTable[c * kJsonEscapeTableEntrySize];
- while (*chars != '\0') *(dest++) = *(chars++);
- }
- }
-
- current_index_ += static_cast<int>(dest - dest_start);
-}
-
-
-template <bool is_ascii, typename Char>
-void BasicJsonStringifier::SerializeString_(Handle<String> string) {
- int length = string->length();
- Append_<is_ascii, char>('"');
- // We make a rough estimate to find out if the current string can be
- // serialized without allocating a new string part. The worst case length of
- // an escaped character is 6. Shifting the remainin string length right by 3
- // is a more pessimistic estimate, but faster to calculate.
-
- if (((part_length_ - current_index_) >> 3) > length) {
- AssertNoAllocation no_allocation;
- Vector<const Char> vector = GetCharVector<Char>(string);
- if (is_ascii) {
- SerializeStringUnchecked_(
- vector.start(),
- SeqOneByteString::cast(*current_part_)->GetChars(),
- length);
- } else {
- SerializeStringUnchecked_(
- vector.start(),
- SeqTwoByteString::cast(*current_part_)->GetChars(),
- length);
- }
- } else {
- String* string_location = *string;
- Vector<const Char> vector = GetCharVector<Char>(string);
- for (int i = 0; i < length; i++) {
- Char c = vector[i];
- if (DoNotEscape(c)) {
- Append_<is_ascii, Char>(c);
- } else {
- Append_<is_ascii, char>(
- &JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
- }
- // If GC moved the string, we need to refresh the vector.
- if (*string != string_location) {
- vector = GetCharVector<Char>(string);
- string_location = *string;
- }
- }
- }
-
- Append_<is_ascii, char>('"');
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(char c) {
- return c >= '#' && c <= '~' && c != '\\';
-}
-
-
-template <>
-bool BasicJsonStringifier::DoNotEscape(uc16 c) {
- return (c >= 0x80) || (c >= '#' && c <= '~' && c != '\\');
-}
-
-
-template <>
-Vector<const char> BasicJsonStringifier::GetCharVector(Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsAscii());
- return flat.ToAsciiVector();
-}
-
-
-template <>
-Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
- String::FlatContent flat = string->GetFlatContent();
- ASSERT(flat.IsTwoByte());
- return flat.ToUC16Vector();
-}
-
-
-void BasicJsonStringifier::SerializeString(Handle<String> object) {
- FlattenString(object);
- String::FlatContent flat = object->GetFlatContent();
- if (is_ascii_) {
- if (flat.IsAscii()) {
- SerializeString_<true, char>(object);
- } else {
- ChangeEncoding();
- SerializeString(object);
- }
- } else {
- if (flat.IsAscii()) {
- SerializeString_<false, char>(object);
- } else {
- SerializeString_<false, uc16>(object);
- }
- }
-}
-
-} } // namespace v8::internal
-
-#endif // V8_JSON_STRINGIFIER_H_
diff --git a/deps/v8/src/json.js b/deps/v8/src/json.js
index 9ab1a31e5..85224b0f0 100644
--- a/deps/v8/src/json.js
+++ b/deps/v8/src/json.js
@@ -178,9 +178,141 @@ function JSONSerialize(key, holder, replacer, stack, indent, gap) {
}
+function BasicSerializeArray(value, stack, builder) {
+ var len = value.length;
+ if (len == 0) {
+ builder.push("[]");
+ return;
+ }
+ if (!%PushIfAbsent(stack, value)) {
+ throw MakeTypeError('circular_structure', $Array());
+ }
+ builder.push("[");
+ var val = value[0];
+ if (IS_STRING(val)) {
+ // First entry is a string. Remaining entries are likely to be strings too.
+ var array_string = %QuoteJSONStringArray(value);
+ if (!IS_UNDEFINED(array_string)) {
+ // array_string also includes bracket characters so we are done.
+ builder[builder.length - 1] = array_string;
+ stack.pop();
+ return;
+ } else {
+ builder.push(%QuoteJSONString(val));
+ for (var i = 1; i < len; i++) {
+ val = value[i];
+ if (IS_STRING(val)) {
+ builder.push(%QuoteJSONStringComma(val));
+ } else {
+ builder.push(",");
+ var before = builder.length;
+ BasicJSONSerialize(i, val, stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
+ }
+ }
+ } else if (IS_NUMBER(val)) {
+ // First entry is a number. Remaining entries are likely to be numbers too.
+ builder.push(JSON_NUMBER_TO_STRING(val));
+ for (var i = 1; i < len; i++) {
+ builder.push(",");
+ val = value[i];
+ if (IS_NUMBER(val)) {
+ builder.push(JSON_NUMBER_TO_STRING(val));
+ } else {
+ var before = builder.length;
+ BasicJSONSerialize(i, val, stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
+ }
+ } else {
+ var before = builder.length;
+ BasicJSONSerialize(0, val, stack, builder);
+ if (before == builder.length) builder.push("null");
+ for (var i = 1; i < len; i++) {
+ builder.push(",");
+ before = builder.length;
+ BasicJSONSerialize(i, value[i], stack, builder);
+ if (before == builder.length) builder[before - 1] = ",null";
+ }
+ }
+ stack.pop();
+ builder.push("]");
+}
+
+
+function BasicSerializeObject(value, stack, builder) {
+ if (!%PushIfAbsent(stack, value)) {
+ throw MakeTypeError('circular_structure', $Array());
+ }
+ builder.push("{");
+ var first = true;
+ var keys = %ObjectKeys(value);
+ var len = keys.length;
+ for (var i = 0; i < len; i++) {
+ var p = keys[i];
+ if (!first) {
+ builder.push(%QuoteJSONStringComma(p));
+ } else {
+ builder.push(%QuoteJSONString(p));
+ }
+ builder.push(":");
+ var before = builder.length;
+ BasicJSONSerialize(p, value[p], stack, builder);
+ if (before == builder.length) {
+ builder.pop();
+ builder.pop();
+ } else {
+ first = false;
+ }
+ }
+ stack.pop();
+ builder.push("}");
+}
+
+
+function BasicJSONSerialize(key, value, stack, builder) {
+ if (IS_SPEC_OBJECT(value)) {
+ var toJSON = value.toJSON;
+ if (IS_SPEC_FUNCTION(toJSON)) {
+ value = %_CallFunction(value, ToString(key), toJSON);
+ }
+ }
+ if (IS_STRING(value)) {
+ builder.push(value !== "" ? %QuoteJSONString(value) : '""');
+ } else if (IS_NUMBER(value)) {
+ builder.push(JSON_NUMBER_TO_STRING(value));
+ } else if (IS_BOOLEAN(value)) {
+ builder.push(value ? "true" : "false");
+ } else if (IS_NULL(value)) {
+ builder.push("null");
+ } else if (IS_SPEC_OBJECT(value) && !(typeof value == "function")) {
+ // Value is a non-callable object.
+ // Unwrap value if necessary
+ if (IS_NUMBER_WRAPPER(value)) {
+ value = ToNumber(value);
+ builder.push(JSON_NUMBER_TO_STRING(value));
+ } else if (IS_STRING_WRAPPER(value)) {
+ builder.push(%QuoteJSONString(ToString(value)));
+ } else if (IS_BOOLEAN_WRAPPER(value)) {
+ builder.push(%_ValueOf(value) ? "true" : "false");
+ } else if (IS_ARRAY(value)) {
+ BasicSerializeArray(value, stack, builder);
+ } else {
+ BasicSerializeObject(value, stack, builder);
+ }
+ }
+}
+
+
function JSONStringify(value, replacer, space) {
if (%_ArgumentsLength() == 1) {
- return %BasicJSONStringify(value);
+ var builder = new InternalArray();
+ BasicJSONSerialize('', value, new InternalArray(), builder);
+ if (builder.length == 0) return;
+ var result = %_FastAsciiArrayJoin(builder, "");
+ if (!IS_UNDEFINED(result)) return result;
+ return %StringBuilderConcat(builder, builder.length, "");
}
if (IS_OBJECT(space)) {
// Unwrap 'space' if it is wrapped
@@ -206,7 +338,6 @@ function JSONStringify(value, replacer, space) {
return JSONSerialize('', {'': value}, replacer, new InternalArray(), "", gap);
}
-
function SetUpJSON() {
%CheckIsBootstrapping();
InstallFunctions($JSON, DONT_ENUM, $Array(
@@ -215,12 +346,4 @@ function SetUpJSON() {
));
}
-
-function JSONSerializeAdapter(key, object) {
- var holder = {};
- holder[key] = object;
- // No need to pass the actual holder since there is no replacer function.
- return JSONSerialize(key, holder, void 0, new InternalArray(), "", "");
-}
-
SetUpJSON();
diff --git a/deps/v8/src/jsregexp.cc b/deps/v8/src/jsregexp.cc
index 813208c95..e59170d5a 100644
--- a/deps/v8/src/jsregexp.cc
+++ b/deps/v8/src/jsregexp.cc
@@ -529,7 +529,7 @@ int RegExpImpl::IrregexpPrepare(Handle<JSRegExp> regexp,
if (!subject->IsFlat()) FlattenString(subject);
// Check the asciiness of the underlying storage.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
#ifdef V8_INTERPRETED_REGEXP
@@ -560,7 +560,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
ASSERT(index <= subject->length());
ASSERT(subject->IsFlat());
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
#ifndef V8_INTERPRETED_REGEXP
ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
@@ -596,7 +596,7 @@ int RegExpImpl::IrregexpExecRaw(Handle<JSRegExp> regexp,
// being internal and external, and even between being ASCII and UC16,
// but the characters are always the same).
IrregexpPrepare(regexp, subject);
- is_ascii = subject->IsOneByteRepresentationUnderneath();
+ is_ascii = subject->IsAsciiRepresentationUnderneath();
} while (true);
UNREACHABLE();
return RE_EXCEPTION;
diff --git a/deps/v8/src/lithium.h b/deps/v8/src/lithium.h
index 089926e71..b4eb2bb2d 100644
--- a/deps/v8/src/lithium.h
+++ b/deps/v8/src/lithium.h
@@ -156,8 +156,8 @@ class LUnallocated: public LOperand {
};
static const int kMaxVirtualRegisters = 1 << kVirtualRegisterWidth;
- static const int kMaxFixedIndex = (1 << (kFixedIndexWidth - 1)) - 1;
- static const int kMinFixedIndex = -(1 << (kFixedIndexWidth - 1));
+ static const int kMaxFixedIndex = (1 << kFixedIndexWidth) - 1;
+ static const int kMinFixedIndex = -(1 << kFixedIndexWidth);
bool HasAnyPolicy() const {
return policy() == ANY;
diff --git a/deps/v8/src/liveedit-debugger.js b/deps/v8/src/liveedit-debugger.js
index 451b146bd..cfcdb818c 100644
--- a/deps/v8/src/liveedit-debugger.js
+++ b/deps/v8/src/liveedit-debugger.js
@@ -76,17 +76,7 @@ Debug.LiveEdit = new function() {
try {
new_compile_info = GatherCompileInfo(new_source, script);
} catch (e) {
- var failure =
- new Failure("Failed to compile new version of script: " + e);
- if (e instanceof SyntaxError) {
- var details = {
- type: "liveedit_compile_error",
- syntaxErrorMessage: e.message
- };
- CopyErrorPositionToDetails(e, details);
- failure.details = details;
- }
- throw failure;
+ throw new Failure("Failed to compile new version of script: " + e);
}
var root_new_node = BuildCodeInfoTree(new_compile_info);
@@ -988,31 +978,6 @@ Debug.LiveEdit = new function() {
return "LiveEdit Failure: " + this.message;
};
- function CopyErrorPositionToDetails(e, details) {
- function createPositionStruct(script, position) {
- if (position == -1) return;
- var location = script.locationFromPosition(position, true);
- if (location == null) return;
- return {
- line: location.line + 1,
- column: location.column + 1,
- position: position
- };
- }
-
- if (!("scriptObject" in e) || !("startPosition" in e)) {
- return;
- }
-
- var script = e.scriptObject;
-
- var position_struct = {
- start: createPositionStruct(script, e.startPosition),
- end: createPositionStruct(script, e.endPosition)
- };
- details.position = position_struct;
- }
-
// A testing entry.
function GetPcFromSourcePos(func, source_pos) {
return %GetFunctionCodePositionFromSource(func, source_pos);
diff --git a/deps/v8/src/liveedit.cc b/deps/v8/src/liveedit.cc
index f491e3720..2a3aafc1f 100644
--- a/deps/v8/src/liveedit.cc
+++ b/deps/v8/src/liveedit.cc
@@ -36,7 +36,6 @@
#include "debug.h"
#include "deoptimizer.h"
#include "global-handles.h"
-#include "messages.h"
#include "parser.h"
#include "scopeinfo.h"
#include "scopes.h"
@@ -704,14 +703,12 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
: JSArrayBasedStruct<FunctionInfoWrapper>(array) {
}
void SetInitialProperties(Handle<String> name, int start_position,
- int end_position, int param_num,
- int literal_count, int parent_index) {
+ int end_position, int param_num, int parent_index) {
HandleScope scope;
this->SetField(kFunctionNameOffset_, name);
this->SetSmiValueField(kStartPositionOffset_, start_position);
this->SetSmiValueField(kEndPositionOffset_, end_position);
this->SetSmiValueField(kParamNumOffset_, param_num);
- this->SetSmiValueField(kLiteralNumOffset_, literal_count);
this->SetSmiValueField(kParentIndexOffset_, parent_index);
}
void SetFunctionCode(Handle<Code> function_code,
@@ -729,9 +726,6 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
Handle<JSValue> info_holder = WrapInJSValue(info);
this->SetField(kSharedFunctionInfoOffset_, info_holder);
}
- int GetLiteralCount() {
- return this->GetSmiValueField(kLiteralNumOffset_);
- }
int GetParentIndex() {
return this->GetSmiValueField(kParentIndexOffset_);
}
@@ -765,8 +759,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
static const int kOuterScopeInfoOffset_ = 6;
static const int kParentIndexOffset_ = 7;
static const int kSharedFunctionInfoOffset_ = 8;
- static const int kLiteralNumOffset_ = 9;
- static const int kSize_ = 10;
+ static const int kSize_ = 9;
friend class JSArrayBasedStruct<FunctionInfoWrapper>;
};
@@ -826,7 +819,6 @@ class FunctionInfoListener {
FunctionInfoWrapper info = FunctionInfoWrapper::Create();
info.SetInitialProperties(fun->name(), fun->start_position(),
fun->end_position(), fun->parameter_count(),
- fun->materialized_literal_count(),
current_parent_index_);
current_parent_index_ = len_;
SetElementNonStrict(result_, len_, info.GetJSArray());
@@ -926,59 +918,11 @@ JSArray* LiveEdit::GatherCompileInfo(Handle<Script> script,
Handle<Object> original_source = Handle<Object>(script->source());
script->set_source(*source);
isolate->set_active_function_info_listener(&listener);
-
- {
- // Creating verbose TryCatch from public API is currently the only way to
- // force code save location. We do not use this the object directly.
- v8::TryCatch try_catch;
- try_catch.SetVerbose(true);
-
- // A logical 'try' section.
- CompileScriptForTracker(isolate, script);
- }
-
- // A logical 'catch' section.
- Handle<JSObject> rethrow_exception;
- if (isolate->has_pending_exception()) {
- Handle<Object> exception(isolate->pending_exception()->ToObjectChecked());
- MessageLocation message_location = isolate->GetMessageLocation();
-
- isolate->clear_pending_message();
- isolate->clear_pending_exception();
-
- // If possible, copy positions from message object to exception object.
- if (exception->IsJSObject() && !message_location.script().is_null()) {
- rethrow_exception = Handle<JSObject>::cast(exception);
-
- Factory* factory = isolate->factory();
- Handle<String> start_pos_key =
- factory->LookupAsciiSymbol("startPosition");
- Handle<String> end_pos_key =
- factory->LookupAsciiSymbol("endPosition");
- Handle<String> script_obj_key =
- factory->LookupAsciiSymbol("scriptObject");
- Handle<Smi> start_pos(Smi::FromInt(message_location.start_pos()));
- Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()));
- Handle<JSValue> script_obj = GetScriptWrapper(message_location.script());
- JSReceiver::SetProperty(
- rethrow_exception, start_pos_key, start_pos, NONE, kNonStrictMode);
- JSReceiver::SetProperty(
- rethrow_exception, end_pos_key, end_pos, NONE, kNonStrictMode);
- JSReceiver::SetProperty(
- rethrow_exception, script_obj_key, script_obj, NONE, kNonStrictMode);
- }
- }
-
- // A logical 'finally' section.
+ CompileScriptForTracker(isolate, script);
isolate->set_active_function_info_listener(NULL);
script->set_source(*original_source);
- if (rethrow_exception.is_null()) {
- return *(listener.GetResult());
- } else {
- isolate->Throw(*rethrow_exception);
- return 0;
- }
+ return *(listener.GetResult());
}
@@ -1070,129 +1014,6 @@ static void ReplaceCodeObject(Handle<Code> original,
}
-// Patch function literals.
-// Name 'literals' is a misnomer. Rather it's a cache for complex object
-// boilerplates and for a native context. We must clean cached values.
-// Additionally we may need to allocate a new array if number of literals
-// changed.
-class LiteralFixer {
- public:
- static void PatchLiterals(FunctionInfoWrapper* compile_info_wrapper,
- Handle<SharedFunctionInfo> shared_info,
- Isolate* isolate) {
- int new_literal_count = compile_info_wrapper->GetLiteralCount();
- if (new_literal_count > 0) {
- new_literal_count += JSFunction::kLiteralsPrefixSize;
- }
- int old_literal_count = shared_info->num_literals();
-
- if (old_literal_count == new_literal_count) {
- // If literal count didn't change, simply go over all functions
- // and clear literal arrays.
- ClearValuesVisitor visitor;
- IterateJSFunctions(*shared_info, &visitor);
- } else {
- // When literal count changes, we have to create new array instances.
- // Since we cannot create instances when iterating heap, we should first
- // collect all functions and fix their literal arrays.
- Handle<FixedArray> function_instances =
- CollectJSFunctions(shared_info, isolate);
- for (int i = 0; i < function_instances->length(); i++) {
- Handle<JSFunction> fun(JSFunction::cast(function_instances->get(i)));
- Handle<FixedArray> old_literals(fun->literals());
- Handle<FixedArray> new_literals =
- isolate->factory()->NewFixedArray(new_literal_count);
- if (new_literal_count > 0) {
- Handle<Context> native_context;
- if (old_literals->length() >
- JSFunction::kLiteralNativeContextIndex) {
- native_context = Handle<Context>(
- JSFunction::NativeContextFromLiterals(fun->literals()));
- } else {
- native_context = Handle<Context>(fun->context()->native_context());
- }
- new_literals->set(JSFunction::kLiteralNativeContextIndex,
- *native_context);
- }
- fun->set_literals(*new_literals);
- }
-
- shared_info->set_num_literals(new_literal_count);
- }
- }
-
- private:
- // Iterates all function instances in the HEAP that refers to the
- // provided shared_info.
- template<typename Visitor>
- static void IterateJSFunctions(SharedFunctionInfo* shared_info,
- Visitor* visitor) {
- AssertNoAllocation no_allocations_please;
-
- HeapIterator iterator;
- for (HeapObject* obj = iterator.next(); obj != NULL;
- obj = iterator.next()) {
- if (obj->IsJSFunction()) {
- JSFunction* function = JSFunction::cast(obj);
- if (function->shared() == shared_info) {
- visitor->visit(function);
- }
- }
- }
- }
-
- // Finds all instances of JSFunction that refers to the provided shared_info
- // and returns array with them.
- static Handle<FixedArray> CollectJSFunctions(
- Handle<SharedFunctionInfo> shared_info, Isolate* isolate) {
- CountVisitor count_visitor;
- count_visitor.count = 0;
- IterateJSFunctions(*shared_info, &count_visitor);
- int size = count_visitor.count;
-
- Handle<FixedArray> result = isolate->factory()->NewFixedArray(size);
- if (size > 0) {
- CollectVisitor collect_visitor(result);
- IterateJSFunctions(*shared_info, &collect_visitor);
- }
- return result;
- }
-
- class ClearValuesVisitor {
- public:
- void visit(JSFunction* fun) {
- FixedArray* literals = fun->literals();
- int len = literals->length();
- for (int j = JSFunction::kLiteralsPrefixSize; j < len; j++) {
- literals->set_undefined(j);
- }
- }
- };
-
- class CountVisitor {
- public:
- void visit(JSFunction* fun) {
- count++;
- }
- int count;
- };
-
- class CollectVisitor {
- public:
- explicit CollectVisitor(Handle<FixedArray> output)
- : m_output(output), m_pos(0) {}
-
- void visit(JSFunction* fun) {
- m_output->set(m_pos, fun);
- m_pos++;
- }
- private:
- Handle<FixedArray> m_output;
- int m_pos;
- };
-};
-
-
// Check whether the code is natural function code (not a lazy-compile stub
// code).
static bool IsJSFunctionCode(Code* code) {
@@ -1259,10 +1080,9 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
Handle<JSArray> new_compile_info_array,
Handle<JSArray> shared_info_array) {
HandleScope scope;
- Isolate* isolate = Isolate::Current();
if (!SharedInfoWrapper::IsInstance(shared_info_array)) {
- return isolate->ThrowIllegalOperation();
+ return Isolate::Current()->ThrowIllegalOperation();
}
FunctionInfoWrapper compile_info_wrapper(new_compile_info_array);
@@ -1293,8 +1113,6 @@ MaybeObject* LiveEdit::ReplaceFunctionCode(
shared_info->set_start_position(start_position);
shared_info->set_end_position(end_position);
- LiteralFixer::PatchLiterals(&compile_info_wrapper, shared_info, isolate);
-
shared_info->set_construct_stub(
Isolate::Current()->builtins()->builtin(
Builtins::kJSConstructStubGeneric));
@@ -1469,9 +1287,7 @@ static Handle<Code> PatchPositionsInCode(
continue;
}
}
- if (RelocInfo::IsRealRelocMode(rinfo->rmode())) {
- buffer_writer.Write(it.rinfo());
- }
+ buffer_writer.Write(it.rinfo());
}
}
diff --git a/deps/v8/src/liveobjectlist.cc b/deps/v8/src/liveobjectlist.cc
index 6dbe0a86e..6b89cf683 100644
--- a/deps/v8/src/liveobjectlist.cc
+++ b/deps/v8/src/liveobjectlist.cc
@@ -71,7 +71,7 @@ typedef int (*RawComparer)(const void*, const void*);
v(ExternalAsciiString, "unexpected: ExternalAsciiString") \
v(ExternalString, "unexpected: ExternalString") \
v(SeqTwoByteString, "unexpected: SeqTwoByteString") \
- v(SeqOneByteString, "unexpected: SeqOneByteString") \
+ v(SeqAsciiString, "unexpected: SeqAsciiString") \
v(SeqString, "unexpected: SeqString") \
v(JSFunctionResultCache, "unexpected: JSFunctionResultCache") \
v(NativeContext, "unexpected: NativeContext") \
diff --git a/deps/v8/src/log-utils.cc b/deps/v8/src/log-utils.cc
index a66db3d93..7bd7baa2d 100644
--- a/deps/v8/src/log-utils.cc
+++ b/deps/v8/src/log-utils.cc
@@ -67,7 +67,6 @@ void Log::Initialize() {
FLAG_log_suspect = true;
FLAG_log_handles = true;
FLAG_log_regexp = true;
- FLAG_log_internal_timer_events = true;
}
// --prof implies --log-code.
@@ -81,8 +80,7 @@ void Log::Initialize() {
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_internal_timer_events;
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
// If we're logging anything, we need to open the log file.
if (open_log_file) {
@@ -107,9 +105,6 @@ void Log::Initialize() {
// one character so we can escape the loop properly.
p--;
break;
- case 'p':
- stream.Add("%d", OS::GetCurrentProcessId());
- break;
case 't': {
// %t expands to the current time in milliseconds.
double time = OS::TimeCurrentMillis();
@@ -262,7 +257,7 @@ void LogMessageBuilder::AppendDetailed(String* str, bool show_impl_info) {
if (len > 0x1000)
len = 0x1000;
if (show_impl_info) {
- Append(str->IsOneByteRepresentation() ? 'a' : '2');
+ Append(str->IsAsciiRepresentation() ? 'a' : '2');
if (StringShape(str).IsExternal())
Append('e');
if (StringShape(str).IsSymbol())
diff --git a/deps/v8/src/log.cc b/deps/v8/src/log.cc
index d0dc76d4b..b049ffe4e 100644
--- a/deps/v8/src/log.cc
+++ b/deps/v8/src/log.cc
@@ -44,6 +44,37 @@
namespace v8 {
namespace internal {
+//
+// Sliding state window. Updates counters to keep track of the last
+// window of kBufferSize states. This is useful to track where we
+// spent our time.
+//
+class SlidingStateWindow {
+ public:
+ explicit SlidingStateWindow(Isolate* isolate);
+ ~SlidingStateWindow();
+ void AddState(StateTag state);
+
+ private:
+ static const int kBufferSize = 256;
+ Counters* counters_;
+ int current_index_;
+ bool is_full_;
+ byte buffer_[kBufferSize];
+
+
+ void IncrementStateCounter(StateTag state) {
+ counters_->state_counters(state)->Increment();
+ }
+
+
+ void DecrementStateCounter(StateTag state) {
+ counters_->state_counters(state)->Decrement();
+ }
+};
+
+
+//
// The Profiler samples pc and sp values for the main thread.
// Each sample is appended to a circular buffer.
// An independent thread removes data and writes it to the log.
@@ -158,12 +189,24 @@ class Ticker: public Sampler {
public:
Ticker(Isolate* isolate, int interval):
Sampler(isolate, interval),
+ window_(NULL),
profiler_(NULL) {}
~Ticker() { if (IsActive()) Stop(); }
virtual void Tick(TickSample* sample) {
if (profiler_) profiler_->Insert(sample);
+ if (window_) window_->AddState(sample->state);
+ }
+
+ void SetWindow(SlidingStateWindow* window) {
+ window_ = window;
+ if (!IsActive()) Start();
+ }
+
+ void ClearWindow() {
+ window_ = NULL;
+ if (!profiler_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
}
void SetProfiler(Profiler* profiler) {
@@ -176,7 +219,7 @@ class Ticker: public Sampler {
void ClearProfiler() {
DecreaseProfilingDepth();
profiler_ = NULL;
- if (IsActive()) Stop();
+ if (!window_ && IsActive() && !RuntimeProfiler::IsEnabled()) Stop();
}
protected:
@@ -185,11 +228,42 @@ class Ticker: public Sampler {
}
private:
+ SlidingStateWindow* window_;
Profiler* profiler_;
};
//
+// SlidingStateWindow implementation.
+//
+SlidingStateWindow::SlidingStateWindow(Isolate* isolate)
+ : counters_(isolate->counters()), current_index_(0), is_full_(false) {
+ for (int i = 0; i < kBufferSize; i++) {
+ buffer_[i] = static_cast<byte>(OTHER);
+ }
+ isolate->logger()->ticker_->SetWindow(this);
+}
+
+
+SlidingStateWindow::~SlidingStateWindow() {
+ LOGGER->ticker_->ClearWindow();
+}
+
+
+void SlidingStateWindow::AddState(StateTag state) {
+ if (is_full_) {
+ DecrementStateCounter(static_cast<StateTag>(buffer_[current_index_]));
+ } else if (current_index_ == kBufferSize - 1) {
+ is_full_ = true;
+ }
+ buffer_[current_index_] = static_cast<byte>(state);
+ IncrementStateCounter(state);
+ ASSERT(IsPowerOf2(kBufferSize));
+ current_index_ = (current_index_ + 1) & (kBufferSize - 1);
+}
+
+
+//
// Profiler implementation.
//
Profiler::Profiler(Isolate* isolate)
@@ -444,6 +518,7 @@ class Logger::NameBuffer {
Logger::Logger()
: ticker_(NULL),
profiler_(NULL),
+ sliding_state_window_(NULL),
log_events_(NULL),
logging_nesting_(0),
cpu_profiler_nesting_(0),
@@ -456,8 +531,7 @@ Logger::Logger()
prev_sp_(NULL),
prev_function_(NULL),
prev_to_(NULL),
- prev_code_(NULL),
- epoch_(0) {
+ prev_code_(NULL) {
}
@@ -630,58 +704,6 @@ void Logger::SharedLibraryEvent(const wchar_t* library_path,
}
-void Logger::TimerEvent(const char* name, int64_t start, int64_t end) {
- if (!log_->IsEnabled()) return;
- ASSERT(FLAG_log_internal_timer_events);
- LogMessageBuilder msg(this);
- int since_epoch = static_cast<int>(start - epoch_);
- int pause_time = static_cast<int>(end - start);
- msg.Append("timer-event,\"%s\",%ld,%ld\n", name, since_epoch, pause_time);
- msg.WriteToLogFile();
-}
-
-
-void Logger::ExternalSwitch(StateTag old_tag, StateTag new_tag) {
- if (old_tag != EXTERNAL && new_tag == EXTERNAL) {
- enter_external_ = OS::Ticks();
- }
- if (old_tag == EXTERNAL && new_tag != EXTERNAL && enter_external_ != 0) {
- TimerEvent("V8.External", enter_external_, OS::Ticks());
- enter_external_ = 0;
- }
-}
-
-
-void Logger::EnterExternal() {
- LOGGER->enter_external_ = OS::Ticks();
-}
-
-
-void Logger::LeaveExternal() {
- if (enter_external_ == 0) return;
- Logger* logger = LOGGER;
- logger->TimerEvent("V8.External", enter_external_, OS::Ticks());
- logger->enter_external_ = 0;
-}
-
-
-int64_t Logger::enter_external_ = 0;
-
-
-void Logger::TimerEventScope::LogTimerEvent() {
- LOG(isolate_, TimerEvent(name_, start_, OS::Ticks()));
-}
-
-
-const char* Logger::TimerEventScope::v8_recompile_synchronous =
- "V8.RecompileSynchronous";
-const char* Logger::TimerEventScope::v8_recompile_parallel =
- "V8.RecompileParallel";
-const char* Logger::TimerEventScope::v8_compile_full_code =
- "V8.CompileFullCode";
-const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
-
-
void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
// Prints "/" + re.source + "/" +
// (re.global?"g":"") + (re.ignorecase?"i":"") + (re.multiline?"m":"")
@@ -852,7 +874,7 @@ void Logger::CallbackEventInternal(const char* prefix, const char* name,
Address entry_point) {
if (!log_->IsEnabled() || !FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,-3,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[CALLBACK_TAG]);
msg.AppendAddress(entry_point);
@@ -908,10 +930,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
for (const char* p = comment; *p != '\0'; p++) {
@@ -948,10 +969,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(name, false);
@@ -1001,10 +1021,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
LogMessageBuilder msg(this);
SmartArrayPointer<char> str =
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,%d,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\",", code->ExecutableSize(), *str);
msg.AppendAddress(shared->address());
@@ -1049,10 +1068,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
SmartArrayPointer<char> sourcestr =
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
- msg.Append("%s,%s,%d,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"%s %s:%d\",",
code->ExecutableSize(),
@@ -1086,10 +1104,9 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,%d,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
- kLogEventsNames[tag],
- code->kind());
+ kLogEventsNames[tag]);
msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
msg.Append('\n');
@@ -1124,7 +1141,7 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
}
if (!FLAG_log_code) return;
LogMessageBuilder msg(this);
- msg.Append("%s,%s,-2,",
+ msg.Append("%s,%s,",
kLogEventsNames[CODE_CREATION_EVENT],
kLogEventsNames[REG_EXP_TAG]);
msg.AppendAddress(code->address());
@@ -1304,7 +1321,6 @@ void Logger::TickEvent(TickSample* sample, bool overflow) {
msg.AppendAddress(sample->pc);
msg.Append(',');
msg.AppendAddress(sample->sp);
- msg.Append(",%ld", static_cast<int>(OS::Ticks() - epoch_));
if (sample->has_external_callback) {
msg.Append(",1,");
msg.AppendAddress(sample->external_callback);
@@ -1337,7 +1353,9 @@ void Logger::PauseProfiler() {
if (--cpu_profiler_nesting_ == 0) {
profiler_->pause();
if (FLAG_prof_lazy) {
- ticker_->Stop();
+ if (!FLAG_sliding_state_window && !RuntimeProfiler::IsEnabled()) {
+ ticker_->Stop();
+ }
FLAG_log_code = false;
LOG(ISOLATE, UncheckedStringEvent("profiler", "pause"));
}
@@ -1358,7 +1376,9 @@ void Logger::ResumeProfiler() {
FLAG_log_code = true;
LogCompiledFunctions();
LogAccessorCallbacks();
- if (!ticker_->IsActive()) ticker_->Start();
+ if (!FLAG_sliding_state_window && !ticker_->IsActive()) {
+ ticker_->Start();
+ }
}
profiler_->resume();
}
@@ -1701,10 +1721,13 @@ bool Logger::SetUp() {
Isolate* isolate = Isolate::Current();
ticker_ = new Ticker(isolate, kSamplingIntervalMs);
+ if (FLAG_sliding_state_window && sliding_state_window_ == NULL) {
+ sliding_state_window_ = new SlidingStateWindow(isolate);
+ }
+
bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
- || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof
- || FLAG_log_internal_timer_events;
+ || FLAG_log_regexp || FLAG_log_state_changes || FLAG_ll_prof;
if (start_logging) {
logging_nesting_ = 1;
@@ -1722,8 +1745,6 @@ bool Logger::SetUp() {
}
}
- if (FLAG_log_internal_timer_events || FLAG_prof) epoch_ = OS::Ticks();
-
return true;
}
@@ -1767,6 +1788,9 @@ FILE* Logger::TearDown() {
profiler_ = NULL;
}
+ delete sliding_state_window_;
+ sliding_state_window_ = NULL;
+
delete ticker_;
ticker_ = NULL;
@@ -1774,6 +1798,22 @@ FILE* Logger::TearDown() {
}
+void Logger::EnableSlidingStateWindow() {
+ // If the ticker is NULL, Logger::SetUp has not been called yet. In
+ // that case, we set the sliding_state_window flag so that the
+ // sliding window computation will be started when Logger::SetUp is
+ // called.
+ if (ticker_ == NULL) {
+ FLAG_sliding_state_window = true;
+ return;
+ }
+ // Otherwise, if the sliding state window computation has not been
+ // started we do it now.
+ if (sliding_state_window_ == NULL) {
+ sliding_state_window_ = new SlidingStateWindow(Isolate::Current());
+ }
+}
+
// Protects the state below.
static Mutex* active_samplers_mutex = NULL;
diff --git a/deps/v8/src/log.h b/deps/v8/src/log.h
index eced6050a..33f359a7f 100644
--- a/deps/v8/src/log.h
+++ b/deps/v8/src/log.h
@@ -74,8 +74,8 @@ namespace internal {
class LogMessageBuilder;
class Profiler;
class Semaphore;
+class SlidingStateWindow;
class Ticker;
-class Isolate;
#undef LOG
#define LOG(isolate, Call) \
@@ -174,6 +174,9 @@ class Logger {
// leaving the file open.
FILE* TearDown();
+ // Enable the computation of a sliding window of states.
+ void EnableSlidingStateWindow();
+
// Emits an event with a string value -> (name, value).
void StringEvent(const char* name, const char* value);
@@ -272,37 +275,6 @@ class Logger {
uintptr_t start,
uintptr_t end);
- // ==== Events logged by --log-timer-events. ====
- void TimerEvent(const char* name, int64_t start, int64_t end);
- void ExternalSwitch(StateTag old_tag, StateTag new_tag);
-
- static void EnterExternal();
- static void LeaveExternal();
-
- class TimerEventScope {
- public:
- TimerEventScope(Isolate* isolate, const char* name)
- : isolate_(isolate), name_(name), start_(0) {
- if (FLAG_log_internal_timer_events) start_ = OS::Ticks();
- }
-
- ~TimerEventScope() {
- if (FLAG_log_internal_timer_events) LogTimerEvent();
- }
-
- void LogTimerEvent();
-
- static const char* v8_recompile_synchronous;
- static const char* v8_recompile_parallel;
- static const char* v8_compile_full_code;
- static const char* v8_execute;
-
- private:
- Isolate* isolate_;
- const char* name_;
- int64_t start_;
- };
-
// ==== Events logged by --log-regexp ====
// Regexp compilation and execution events.
@@ -429,6 +401,10 @@ class Logger {
// of samples.
Profiler* profiler_;
+ // SlidingStateWindow instance keeping a sliding window of the most
+ // recent VM states.
+ SlidingStateWindow* sliding_state_window_;
+
// An array of log events names.
const char* const* log_events_;
@@ -439,6 +415,7 @@ class Logger {
friend class LogMessageBuilder;
friend class TimeLog;
friend class Profiler;
+ friend class SlidingStateWindow;
friend class StackTracer;
friend class VMState;
@@ -472,9 +449,6 @@ class Logger {
// Logger::FunctionCreateEvent(...)
Address prev_code_;
- int64_t epoch_;
- static int64_t enter_external_;
-
friend class CpuProfiler;
};
diff --git a/deps/v8/src/macros.py b/deps/v8/src/macros.py
index f871fc55c..08fa82e68 100644
--- a/deps/v8/src/macros.py
+++ b/deps/v8/src/macros.py
@@ -32,8 +32,6 @@ const NONE = 0;
const READ_ONLY = 1;
const DONT_ENUM = 2;
const DONT_DELETE = 4;
-const NEW_ONE_BYTE_STRING = true;
-const NEW_TWO_BYTE_STRING = false;
# Constants used for getter and setter operations.
const GETTER = 0;
diff --git a/deps/v8/src/mark-compact.cc b/deps/v8/src/mark-compact.cc
index 4e6599f09..24730c6c0 100644
--- a/deps/v8/src/mark-compact.cc
+++ b/deps/v8/src/mark-compact.cc
@@ -62,7 +62,6 @@ MarkCompactCollector::MarkCompactCollector() : // NOLINT
sweep_precisely_(false),
reduce_memory_footprint_(false),
abort_incremental_marking_(false),
- marking_parity_(ODD_MARKING_PARITY),
compacting_(false),
was_marked_incrementally_(false),
tracer_(NULL),
@@ -405,13 +404,6 @@ void MarkCompactCollector::CollectGarbage() {
Finish();
- if (marking_parity_ == EVEN_MARKING_PARITY) {
- marking_parity_ = ODD_MARKING_PARITY;
- } else {
- ASSERT(marking_parity_ == ODD_MARKING_PARITY);
- marking_parity_ = EVEN_MARKING_PARITY;
- }
-
tracer_ = NULL;
}
@@ -488,7 +480,6 @@ void MarkCompactCollector::ClearMarkbits() {
MarkBit mark_bit = Marking::MarkBitFrom(obj);
mark_bit.Clear();
mark_bit.Next().Clear();
- Page::FromAddress(obj->address())->ResetProgressBar();
Page::FromAddress(obj->address())->ResetLiveBytes();
}
}
@@ -885,8 +876,8 @@ void CodeFlusher::ProcessJSFunctionCandidates() {
if (!code_mark.Get()) {
shared->set_code(lazy_compile);
candidate->set_code(lazy_compile);
- } else {
- candidate->set_code(code);
+ } else if (code == lazy_compile) {
+ candidate->set_code(lazy_compile);
}
// We are in the middle of a GC cycle so the write barrier in the code
@@ -935,107 +926,6 @@ void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
}
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
-
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- if (candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- shared_function_info_candidates_head_ = next_candidate;
- ClearNextCandidate(shared_info);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == shared_info) {
- next_candidate = GetNextCandidate(shared_info);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(shared_info);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictCandidate(JSFunction* function) {
- ASSERT(!function->next_function_link()->IsUndefined());
- Object* undefined = isolate_->heap()->undefined_value();
-
- // Make sure previous flushing decisions are revisited.
- isolate_->heap()->incremental_marking()->RecordWrites(function);
- isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- if (candidate == function) {
- next_candidate = GetNextCandidate(function);
- jsfunction_candidates_head_ = next_candidate;
- ClearNextCandidate(function, undefined);
- } else {
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
-
- if (next_candidate == function) {
- next_candidate = GetNextCandidate(function);
- SetNextCandidate(candidate, next_candidate);
- ClearNextCandidate(function, undefined);
- break;
- }
-
- candidate = next_candidate;
- }
- }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
- Object* undefined = isolate_->heap()->undefined_value();
-
- JSFunction* candidate = jsfunction_candidates_head_;
- JSFunction* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate, undefined);
- candidate = next_candidate;
- }
-
- jsfunction_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
- SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
- SharedFunctionInfo* next_candidate;
- while (candidate != NULL) {
- next_candidate = GetNextCandidate(candidate);
- ClearNextCandidate(candidate);
- candidate = next_candidate;
- }
-
- shared_function_info_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
- Heap* heap = isolate_->heap();
-
- JSFunction** slot = &jsfunction_candidates_head_;
- JSFunction* candidate = jsfunction_candidates_head_;
- while (candidate != NULL) {
- if (heap->InFromSpace(candidate)) {
- v->VisitPointer(reinterpret_cast<Object**>(slot));
- }
- candidate = GetNextCandidate(*slot);
- slot = GetNextCandidateSlot(*slot);
- }
-}
-
-
MarkCompactCollector::~MarkCompactCollector() {
if (code_flusher_ != NULL) {
delete code_flusher_;
@@ -1540,13 +1430,21 @@ void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
void MarkCompactCollector::PrepareForCodeFlushing() {
ASSERT(heap() == Isolate::Current()->heap());
- // Enable code flushing for non-incremental cycles.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(!was_marked_incrementally_);
+ // TODO(1609) Currently incremental marker does not support code flushing.
+ if (!FLAG_flush_code || was_marked_incrementally_) {
+ EnableCodeFlushing(false);
+ return;
+ }
+
+#ifdef ENABLE_DEBUGGER_SUPPORT
+ if (heap()->isolate()->debug()->IsLoaded() ||
+ heap()->isolate()->debug()->has_break_points()) {
+ EnableCodeFlushing(false);
+ return;
}
+#endif
- // If code flushing is disabled, there is no need to prepare for it.
- if (!is_code_flushing_enabled()) return;
+ EnableCodeFlushing(true);
// Ensure that empty descriptor array is marked. Method MarkDescriptorArray
// relies on it being marked before any other descriptor array.
@@ -1777,16 +1675,6 @@ bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
}
-bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
- Object** p) {
- Object* o = *p;
- ASSERT(o->IsHeapObject());
- HeapObject* heap_object = HeapObject::cast(o);
- MarkBit mark = Marking::MarkBitFrom(heap_object);
- return !mark.Get();
-}
-
-
void MarkCompactCollector::MarkSymbolTable() {
SymbolTable* symbol_table = heap()->symbol_table();
// Mark the symbol table itself.
@@ -1815,6 +1703,54 @@ void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
}
+void MarkCompactCollector::MarkObjectGroups() {
+ List<ObjectGroup*>* object_groups =
+ heap()->isolate()->global_handles()->object_groups();
+
+ int last = 0;
+ for (int i = 0; i < object_groups->length(); i++) {
+ ObjectGroup* entry = object_groups->at(i);
+ ASSERT(entry != NULL);
+
+ Object*** objects = entry->objects_;
+ bool group_marked = false;
+ for (size_t j = 0; j < entry->length_; j++) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ if (mark.Get()) {
+ group_marked = true;
+ break;
+ }
+ }
+ }
+
+ if (!group_marked) {
+ (*object_groups)[last++] = entry;
+ continue;
+ }
+
+ // An object in the group is marked, so mark as grey all white heap
+ // objects in the group.
+ for (size_t j = 0; j < entry->length_; ++j) {
+ Object* object = *objects[j];
+ if (object->IsHeapObject()) {
+ HeapObject* heap_object = HeapObject::cast(object);
+ MarkBit mark = Marking::MarkBitFrom(heap_object);
+ MarkObject(heap_object, mark);
+ }
+ }
+
+ // Once the entire group has been colored grey, set the object group
+ // to NULL so it won't be processed again.
+ entry->Dispose();
+ object_groups->at(i) = NULL;
+ }
+ object_groups->Rewind(last);
+}
+
+
void MarkCompactCollector::MarkImplicitRefGroups() {
List<ImplicitRefGroup*>* ref_groups =
heap()->isolate()->global_handles()->implicit_ref_groups();
@@ -1933,12 +1869,11 @@ void MarkCompactCollector::ProcessMarkingDeque() {
}
-void MarkCompactCollector::ProcessExternalMarking(RootMarkingVisitor* visitor) {
+void MarkCompactCollector::ProcessExternalMarking() {
bool work_to_do = true;
ASSERT(marking_deque_.IsEmpty());
while (work_to_do) {
- heap()->isolate()->global_handles()->IterateObjectGroups(
- visitor, &IsUnmarkedHeapObjectWithHeap);
+ MarkObjectGroups();
MarkImplicitRefGroups();
work_to_do = !marking_deque_.IsEmpty();
ProcessMarkingDeque();
@@ -2017,7 +1952,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// The objects reachable from the roots are marked, yet unreachable
// objects are unmarked. Mark objects reachable due to host
// application specific logic.
- ProcessExternalMarking(&root_visitor);
+ ProcessExternalMarking();
// The objects reachable from the roots or object groups are marked,
// yet unreachable objects are unmarked. Mark objects reachable
@@ -2036,7 +1971,7 @@ void MarkCompactCollector::MarkLiveObjects() {
// Repeat host application specific marking to mark unmarked objects
// reachable from the weak roots.
- ProcessExternalMarking(&root_visitor);
+ ProcessExternalMarking();
AfterMarking();
}
@@ -2070,11 +2005,9 @@ void MarkCompactCollector::AfterMarking() {
// Flush code from collected candidates.
if (is_code_flushing_enabled()) {
code_flusher_->ProcessCandidates();
- // If incremental marker does not support code flushing, we need to
- // disable it before incremental marking steps for next cycle.
- if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
- EnableCodeFlushing(false);
- }
+ // TODO(1609) Currently incremental marker does not support code flushing,
+ // we need to disable it before incremental marking steps for next cycle.
+ EnableCodeFlushing(false);
}
if (!FLAG_watch_ic_patching) {
@@ -2401,16 +2334,6 @@ class PointersUpdatingVisitor: public ObjectVisitor {
}
}
- void VisitCodeAgeSequence(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- ASSERT(stub != NULL);
- VisitPointer(&stub);
- if (stub != rinfo->code_age_stub()) {
- rinfo->set_code_age_stub(Code::cast(stub));
- }
- }
-
void VisitDebugTarget(RelocInfo* rinfo) {
ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
rinfo->IsPatchedReturnSequence()) ||
@@ -3520,6 +3443,7 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
intptr_t freed_bytes = 0;
int pages_swept = 0;
+ intptr_t newspace_size = space->heap()->new_space()->Size();
bool lazy_sweeping_active = false;
bool unused_page_present = false;
@@ -3582,8 +3506,15 @@ void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
}
freed_bytes += SweepConservatively(space, p);
pages_swept++;
- space->SetPagesToSweep(p->next_page());
- lazy_sweeping_active = true;
+ if (freed_bytes > 2 * newspace_size) {
+ space->SetPagesToSweep(p->next_page());
+ lazy_sweeping_active = true;
+ } else {
+ if (FLAG_gc_verbose) {
+ PrintF("Only %" V8PRIdPTR " bytes freed. Still sweeping.\n",
+ freed_bytes);
+ }
+ }
break;
}
case PRECISE: {
@@ -3651,19 +3582,11 @@ void MarkCompactCollector::SweepSpaces() {
void MarkCompactCollector::EnableCodeFlushing(bool enable) {
-#ifdef ENABLE_DEBUGGER_SUPPORT
- if (heap()->isolate()->debug()->IsLoaded() ||
- heap()->isolate()->debug()->has_break_points()) {
- enable = false;
- }
-#endif
-
if (enable) {
if (code_flusher_ != NULL) return;
code_flusher_ = new CodeFlusher(heap()->isolate());
} else {
if (code_flusher_ == NULL) return;
- code_flusher_->EvictAllCandidates();
delete code_flusher_;
code_flusher_ = NULL;
}
diff --git a/deps/v8/src/mark-compact.h b/deps/v8/src/mark-compact.h
index 8821c3df3..7c648000d 100644
--- a/deps/v8/src/mark-compact.h
+++ b/deps/v8/src/mark-compact.h
@@ -420,45 +420,25 @@ class CodeFlusher {
shared_function_info_candidates_head_(NULL) {}
void AddCandidate(SharedFunctionInfo* shared_info) {
- if (GetNextCandidate(shared_info) == NULL) {
- SetNextCandidate(shared_info, shared_function_info_candidates_head_);
- shared_function_info_candidates_head_ = shared_info;
- }
+ SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+ shared_function_info_candidates_head_ = shared_info;
}
void AddCandidate(JSFunction* function) {
ASSERT(function->code() == function->shared()->code());
- if (GetNextCandidate(function)->IsUndefined()) {
- SetNextCandidate(function, jsfunction_candidates_head_);
- jsfunction_candidates_head_ = function;
- }
+ ASSERT(function->next_function_link()->IsUndefined());
+ SetNextCandidate(function, jsfunction_candidates_head_);
+ jsfunction_candidates_head_ = function;
}
- void EvictCandidate(SharedFunctionInfo* shared_info);
- void EvictCandidate(JSFunction* function);
-
void ProcessCandidates() {
ProcessSharedFunctionInfoCandidates();
ProcessJSFunctionCandidates();
}
- void EvictAllCandidates() {
- EvictJSFunctionCandidates();
- EvictSharedFunctionInfoCandidates();
- }
-
- void IteratePointersToFromSpace(ObjectVisitor* v);
-
private:
void ProcessJSFunctionCandidates();
void ProcessSharedFunctionInfoCandidates();
- void EvictJSFunctionCandidates();
- void EvictSharedFunctionInfoCandidates();
-
- static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
- return reinterpret_cast<JSFunction**>(
- HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
- }
static JSFunction* GetNextCandidate(JSFunction* candidate) {
Object* next_candidate = candidate->next_function_link();
@@ -659,12 +639,8 @@ class MarkCompactCollector {
void ClearMarkbits();
- bool abort_incremental_marking() const { return abort_incremental_marking_; }
-
bool is_compacting() const { return compacting_; }
- MarkingParity marking_parity() { return marking_parity_; }
-
private:
MarkCompactCollector();
~MarkCompactCollector();
@@ -697,8 +673,6 @@ class MarkCompactCollector {
bool abort_incremental_marking_;
- MarkingParity marking_parity_;
-
// True if we are collecting slots to perform evacuation from evacuation
// candidates.
bool compacting_;
@@ -757,13 +731,17 @@ class MarkCompactCollector {
// symbol table are weak.
void MarkSymbolTable();
+ // Mark objects in object groups that have at least one object in the
+ // group marked.
+ void MarkObjectGroups();
+
// Mark objects in implicit references groups if their parent object
// is marked.
void MarkImplicitRefGroups();
// Mark all objects which are reachable due to host application
// logic like object groups or implicit references' groups.
- void ProcessExternalMarking(RootMarkingVisitor* visitor);
+ void ProcessExternalMarking();
// Mark objects reachable (transitively) from objects in the marking stack
// or overflowed in the heap.
@@ -787,7 +765,6 @@ class MarkCompactCollector {
// Callback function for telling whether the object *p is an unmarked
// heap object.
static bool IsUnmarkedHeapObject(Object** p);
- static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
// Map transitions from a live map to a dead map must be killed.
// We replace them with a null descriptor, with the same key.
diff --git a/deps/v8/src/math.js b/deps/v8/src/math.js
index 46863284f..aee56af4f 100644
--- a/deps/v8/src/math.js
+++ b/deps/v8/src/math.js
@@ -131,16 +131,19 @@ function MathMax(arg1, arg2) { // length == 2
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- var r = -1/0; // Compiler constant-folds this to -Infinity.
- for (var i = 0; i < length; i++) {
+ if (length == 0) {
+ return -1/0; // Compiler constant-folds this to -Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+ if (NUMBER_IS_NAN(n)) return n;
// Make sure +0 is considered greater than -0. -0 is never a Smi, +0 can be
// a Smi or heap number.
- if (NUMBER_IS_NAN(n) || n > r ||
- (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) {
- r = n;
- }
+ if (n > r || (r == 0 && n == 0 && !%_IsSmi(r) && 1 / r < 0)) r = n;
}
return r;
}
@@ -161,16 +164,19 @@ function MathMin(arg1, arg2) { // length == 2
// All comparisons failed, one of the arguments must be NaN.
return 0/0; // Compiler constant-folds this to NaN.
}
- var r = 1/0; // Compiler constant-folds this to Infinity.
- for (var i = 0; i < length; i++) {
+ if (length == 0) {
+ return 1/0; // Compiler constant-folds this to Infinity.
+ }
+ var r = arg1;
+ if (!IS_NUMBER(r)) r = NonNumberToNumber(r);
+ if (NUMBER_IS_NAN(r)) return r;
+ for (var i = 1; i < length; i++) {
var n = %_Arguments(i);
if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+ if (NUMBER_IS_NAN(n)) return n;
// Make sure -0 is considered less than +0. -0 is never a Smi, +0 can be a
// Smi or a heap number.
- if (NUMBER_IS_NAN(n) || n < r ||
- (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) {
- r = n;
- }
+ if (n < r || (r == 0 && n == 0 && !%_IsSmi(n) && 1 / n < 0)) r = n;
}
return r;
}
diff --git a/deps/v8/src/messages.cc b/deps/v8/src/messages.cc
index ce965fcf9..23fd4fd5d 100644
--- a/deps/v8/src/messages.cc
+++ b/deps/v8/src/messages.cc
@@ -155,9 +155,7 @@ Handle<String> MessageHandler::GetMessage(Handle<Object> data) {
JSFunction::cast(
Isolate::Current()->js_builtins_object()->
GetPropertyNoExceptionThrown(*fmt_str)));
- Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
- Handle<Object> argv[] = { Handle<Object>(message->type()),
- Handle<Object>(message->arguments()) };
+ Handle<Object> argv[] = { data };
bool caught_exception;
Handle<Object> result =
diff --git a/deps/v8/src/messages.js b/deps/v8/src/messages.js
index f0c51c61f..fe894b578 100644
--- a/deps/v8/src/messages.js
+++ b/deps/v8/src/messages.js
@@ -26,137 +26,18 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// -------------------------------------------------------------------
+//
+// If this object gets passed to an error constructor the error will
+// get an accessor for .message that constructs a descriptive error
+// message on access.
+var kAddMessageAccessorsMarker = { };
+
+// This will be lazily initialized when first needed (and forcibly
+// overwritten even though it's const).
+var kMessages = 0;
-var kMessages = {
- // Error
- cyclic_proto: ["Cyclic __proto__ value"],
- code_gen_from_strings: ["%0"],
- // TypeError
- unexpected_token: ["Unexpected token ", "%0"],
- unexpected_token_number: ["Unexpected number"],
- unexpected_token_string: ["Unexpected string"],
- unexpected_token_identifier: ["Unexpected identifier"],
- unexpected_reserved: ["Unexpected reserved word"],
- unexpected_strict_reserved: ["Unexpected strict mode reserved word"],
- unexpected_eos: ["Unexpected end of input"],
- malformed_regexp: ["Invalid regular expression: /", "%0", "/: ", "%1"],
- unterminated_regexp: ["Invalid regular expression: missing /"],
- regexp_flags: ["Cannot supply flags when constructing one RegExp from another"],
- incompatible_method_receiver: ["Method ", "%0", " called on incompatible receiver ", "%1"],
- invalid_lhs_in_assignment: ["Invalid left-hand side in assignment"],
- invalid_lhs_in_for_in: ["Invalid left-hand side in for-in"],
- invalid_lhs_in_postfix_op: ["Invalid left-hand side expression in postfix operation"],
- invalid_lhs_in_prefix_op: ["Invalid left-hand side expression in prefix operation"],
- multiple_defaults_in_switch: ["More than one default clause in switch statement"],
- newline_after_throw: ["Illegal newline after throw"],
- redeclaration: ["%0", " '", "%1", "' has already been declared"],
- no_catch_or_finally: ["Missing catch or finally after try"],
- unknown_label: ["Undefined label '", "%0", "'"],
- uncaught_exception: ["Uncaught ", "%0"],
- stack_trace: ["Stack Trace:\n", "%0"],
- called_non_callable: ["%0", " is not a function"],
- undefined_method: ["Object ", "%1", " has no method '", "%0", "'"],
- property_not_function: ["Property '", "%0", "' of object ", "%1", " is not a function"],
- cannot_convert_to_primitive: ["Cannot convert object to primitive value"],
- not_constructor: ["%0", " is not a constructor"],
- not_defined: ["%0", " is not defined"],
- non_object_property_load: ["Cannot read property '", "%0", "' of ", "%1"],
- non_object_property_store: ["Cannot set property '", "%0", "' of ", "%1"],
- non_object_property_call: ["Cannot call method '", "%0", "' of ", "%1"],
- with_expression: ["%0", " has no properties"],
- illegal_invocation: ["Illegal invocation"],
- no_setter_in_callback: ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
- apply_non_function: ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
- apply_wrong_args: ["Function.prototype.apply: Arguments list has wrong type"],
- invalid_in_operator_use: ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
- instanceof_function_expected: ["Expecting a function in instanceof check, but got ", "%0"],
- instanceof_nonobject_proto: ["Function has non-object prototype '", "%0", "' in instanceof check"],
- null_to_object: ["Cannot convert null to object"],
- reduce_no_initial: ["Reduce of empty array with no initial value"],
- getter_must_be_callable: ["Getter must be a function: ", "%0"],
- setter_must_be_callable: ["Setter must be a function: ", "%0"],
- value_and_accessor: ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
- proto_object_or_null: ["Object prototype may only be an Object or null"],
- property_desc_object: ["Property description must be an object: ", "%0"],
- redefine_disallowed: ["Cannot redefine property: ", "%0"],
- define_disallowed: ["Cannot define property:", "%0", ", object is not extensible."],
- non_extensible_proto: ["%0", " is not extensible"],
- handler_non_object: ["Proxy.", "%0", " called with non-object as handler"],
- proto_non_object: ["Proxy.", "%0", " called with non-object as prototype"],
- trap_function_expected: ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
- handler_trap_missing: ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
- handler_trap_must_be_callable: ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
- handler_returned_false: ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
- handler_returned_undefined: ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
- proxy_prop_not_configurable: ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
- proxy_non_object_prop_names: ["Trap '", "%1", "' returned non-object ", "%0"],
- proxy_repeated_prop_name: ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
- invalid_weakmap_key: ["Invalid value used as weak map key"],
- not_date_object: ["this is not a Date object."],
- observe_non_object: ["Object.", "%0", " cannot ", "%0", " non-object"],
- observe_non_function: ["Object.", "%0", " cannot deliver to non-function"],
- observe_callback_frozen: ["Object.observe cannot deliver to a frozen function object"],
- observe_type_non_string: ["Invalid changeRecord with non-string 'type' property"],
- observe_notify_non_notifier: ["notify called on non-notifier object"],
- // RangeError
- invalid_array_length: ["Invalid array length"],
- stack_overflow: ["Maximum call stack size exceeded"],
- invalid_time_value: ["Invalid time value"],
- // SyntaxError
- unable_to_parse: ["Parse error"],
- invalid_regexp_flags: ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
- invalid_regexp: ["Invalid RegExp pattern /", "%0", "/"],
- illegal_break: ["Illegal break statement"],
- illegal_continue: ["Illegal continue statement"],
- illegal_return: ["Illegal return statement"],
- illegal_let: ["Illegal let declaration outside extended mode"],
- error_loading_debugger: ["Error loading debugger"],
- no_input_to_regexp: ["No input to ", "%0"],
- invalid_json: ["String '", "%0", "' is not valid JSON"],
- circular_structure: ["Converting circular structure to JSON"],
- called_on_non_object: ["%0", " called on non-object"],
- called_on_null_or_undefined: ["%0", " called on null or undefined"],
- array_indexof_not_defined: ["Array.getIndexOf: Argument undefined"],
- object_not_extensible: ["Can't add property ", "%0", ", object is not extensible"],
- illegal_access: ["Illegal access"],
- invalid_preparser_data: ["Invalid preparser data for function ", "%0"],
- strict_mode_with: ["Strict mode code may not include a with statement"],
- strict_catch_variable: ["Catch variable may not be eval or arguments in strict mode"],
- too_many_arguments: ["Too many arguments in function call (only 32766 allowed)"],
- too_many_parameters: ["Too many parameters in function definition (only 32766 allowed)"],
- too_many_variables: ["Too many variables declared (only 131071 allowed)"],
- strict_param_name: ["Parameter name eval or arguments is not allowed in strict mode"],
- strict_param_dupe: ["Strict mode function may not have duplicate parameter names"],
- strict_var_name: ["Variable name may not be eval or arguments in strict mode"],
- strict_function_name: ["Function name may not be eval or arguments in strict mode"],
- strict_octal_literal: ["Octal literals are not allowed in strict mode."],
- strict_duplicate_property: ["Duplicate data property in object literal not allowed in strict mode"],
- accessor_data_property: ["Object literal may not have data and accessor property with the same name"],
- accessor_get_set: ["Object literal may not have multiple get/set accessors with the same name"],
- strict_lhs_assignment: ["Assignment to eval or arguments is not allowed in strict mode"],
- strict_lhs_postfix: ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_lhs_prefix: ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
- strict_reserved_word: ["Use of future reserved word in strict mode"],
- strict_delete: ["Delete of an unqualified identifier in strict mode."],
- strict_delete_property: ["Cannot delete property '", "%0", "' of ", "%1"],
- strict_const: ["Use of const in strict mode."],
- strict_function: ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
- strict_read_only_property: ["Cannot assign to read only property '", "%0", "' of ", "%1"],
- strict_cannot_assign: ["Cannot assign to read only '", "%0", "' in strict mode"],
- strict_poison_pill: ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
- strict_caller: ["Illegal access to a strict mode caller function."],
- unprotected_let: ["Illegal let declaration in unprotected statement context."],
- unprotected_const: ["Illegal const declaration in unprotected statement context."],
- cant_prevent_ext_external_array_elements: ["Cannot prevent extension of an object with external array elements"],
- redef_external_array_element: ["Cannot redefine a property of an object with external array elements"],
- harmony_const_assign: ["Assignment to constant variable."],
- invalid_module_path: ["Module does not export '", "%0", "', or export is not itself a module"],
- module_type_error: ["Module '", "%0", "' used improperly"],
- module_export_undefined: ["Export '", "%0", "' is not defined in module"],
-};
-
-
-function FormatString(format, args) {
+function FormatString(format, message) {
+ var args = %MessageGetArguments(message);
var result = "";
var arg_num = 0;
for (var i = 0; i < format.length; i++) {
@@ -167,7 +48,7 @@ function FormatString(format, args) {
if (arg_num < 4) {
// str is one of %0, %1, %2 or %3.
try {
- str = NoSideEffectToString(args[arg_num]);
+ str = ToDetailString(args[arg_num]);
} catch (e) {
if (%IsJSModule(args[arg_num]))
str = "module";
@@ -184,27 +65,6 @@ function FormatString(format, args) {
}
-function NoSideEffectToString(obj) {
- if (IS_STRING(obj)) return obj;
- if (IS_NUMBER(obj)) return %_NumberToString(obj);
- if (IS_BOOLEAN(obj)) return x ? 'true' : 'false';
- if (IS_UNDEFINED(obj)) return 'undefined';
- if (IS_NULL(obj)) return 'null';
- if (IS_FUNCTION(obj)) return %_CallFunction(obj, FunctionToString);
- if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
- var constructor = obj.constructor;
- if (typeof constructor == "function") {
- var constructorName = constructor.name;
- if (IS_STRING(constructorName) && constructorName !== "") {
- return "#<" + constructorName + ">";
- }
- }
- }
- if (IsNativeErrorObject(obj)) return %_CallFunction(obj, ErrorToString);
- return %_CallFunction(obj, ObjectToString);
-}
-
-
// To check if something is a native error we need to check the
// concrete native error types. It is not sufficient to use instanceof
// since it possible to create an object that has Error.prototype on
@@ -252,8 +112,13 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) {
- if (IS_UNDEFINED(args)) args = [];
- return new constructor(FormatMessage(type, args));
+ if (IS_UNDEFINED(args)) {
+ args = [];
+ }
+ var e = new constructor(kAddMessageAccessorsMarker);
+ e.type = type;
+ e.arguments = args;
+ return e;
}
@@ -270,10 +135,156 @@ function MakeGenericError(constructor, type, args) {
// Helper functions; called from the runtime system.
-function FormatMessage(type, args) {
- var format = kMessages[type];
- if (!format) return "<unknown message " + type + ">";
- return FormatString(format, args);
+function FormatMessage(message) {
+ if (kMessages === 0) {
+ var messagesDictionary = [
+ // Error
+ "cyclic_proto", ["Cyclic __proto__ value"],
+ "code_gen_from_strings", ["%0"],
+ // TypeError
+ "unexpected_token", ["Unexpected token ", "%0"],
+ "unexpected_token_number", ["Unexpected number"],
+ "unexpected_token_string", ["Unexpected string"],
+ "unexpected_token_identifier", ["Unexpected identifier"],
+ "unexpected_reserved", ["Unexpected reserved word"],
+ "unexpected_strict_reserved", ["Unexpected strict mode reserved word"],
+ "unexpected_eos", ["Unexpected end of input"],
+ "malformed_regexp", ["Invalid regular expression: /", "%0", "/: ", "%1"],
+ "unterminated_regexp", ["Invalid regular expression: missing /"],
+ "regexp_flags", ["Cannot supply flags when constructing one RegExp from another"],
+ "incompatible_method_receiver", ["Method ", "%0", " called on incompatible receiver ", "%1"],
+ "invalid_lhs_in_assignment", ["Invalid left-hand side in assignment"],
+ "invalid_lhs_in_for_in", ["Invalid left-hand side in for-in"],
+ "invalid_lhs_in_postfix_op", ["Invalid left-hand side expression in postfix operation"],
+ "invalid_lhs_in_prefix_op", ["Invalid left-hand side expression in prefix operation"],
+ "multiple_defaults_in_switch", ["More than one default clause in switch statement"],
+ "newline_after_throw", ["Illegal newline after throw"],
+ "redeclaration", ["%0", " '", "%1", "' has already been declared"],
+ "no_catch_or_finally", ["Missing catch or finally after try"],
+ "unknown_label", ["Undefined label '", "%0", "'"],
+ "uncaught_exception", ["Uncaught ", "%0"],
+ "stack_trace", ["Stack Trace:\n", "%0"],
+ "called_non_callable", ["%0", " is not a function"],
+ "undefined_method", ["Object ", "%1", " has no method '", "%0", "'"],
+ "property_not_function", ["Property '", "%0", "' of object ", "%1", " is not a function"],
+ "cannot_convert_to_primitive", ["Cannot convert object to primitive value"],
+ "not_constructor", ["%0", " is not a constructor"],
+ "not_defined", ["%0", " is not defined"],
+ "non_object_property_load", ["Cannot read property '", "%0", "' of ", "%1"],
+ "non_object_property_store", ["Cannot set property '", "%0", "' of ", "%1"],
+ "non_object_property_call", ["Cannot call method '", "%0", "' of ", "%1"],
+ "with_expression", ["%0", " has no properties"],
+ "illegal_invocation", ["Illegal invocation"],
+ "no_setter_in_callback", ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
+ "apply_non_function", ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
+ "apply_wrong_args", ["Function.prototype.apply: Arguments list has wrong type"],
+ "invalid_in_operator_use", ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
+ "instanceof_function_expected", ["Expecting a function in instanceof check, but got ", "%0"],
+ "instanceof_nonobject_proto", ["Function has non-object prototype '", "%0", "' in instanceof check"],
+ "null_to_object", ["Cannot convert null to object"],
+ "reduce_no_initial", ["Reduce of empty array with no initial value"],
+ "getter_must_be_callable", ["Getter must be a function: ", "%0"],
+ "setter_must_be_callable", ["Setter must be a function: ", "%0"],
+ "value_and_accessor", ["Invalid property. A property cannot both have accessors and be writable or have a value, ", "%0"],
+ "proto_object_or_null", ["Object prototype may only be an Object or null"],
+ "property_desc_object", ["Property description must be an object: ", "%0"],
+ "redefine_disallowed", ["Cannot redefine property: ", "%0"],
+ "define_disallowed", ["Cannot define property:", "%0", ", object is not extensible."],
+ "non_extensible_proto", ["%0", " is not extensible"],
+ "handler_non_object", ["Proxy.", "%0", " called with non-object as handler"],
+ "proto_non_object", ["Proxy.", "%0", " called with non-object as prototype"],
+ "trap_function_expected", ["Proxy.", "%0", " called with non-function for '", "%1", "' trap"],
+ "handler_trap_missing", ["Proxy handler ", "%0", " has no '", "%1", "' trap"],
+ "handler_trap_must_be_callable", ["Proxy handler ", "%0", " has non-callable '", "%1", "' trap"],
+ "handler_returned_false", ["Proxy handler ", "%0", " returned false from '", "%1", "' trap"],
+ "handler_returned_undefined", ["Proxy handler ", "%0", " returned undefined from '", "%1", "' trap"],
+ "proxy_prop_not_configurable", ["Proxy handler ", "%0", " returned non-configurable descriptor for property '", "%2", "' from '", "%1", "' trap"],
+ "proxy_non_object_prop_names", ["Trap '", "%1", "' returned non-object ", "%0"],
+ "proxy_repeated_prop_name", ["Trap '", "%1", "' returned repeated property name '", "%2", "'"],
+ "invalid_weakmap_key", ["Invalid value used as weak map key"],
+ "not_date_object", ["this is not a Date object."],
+ // RangeError
+ "invalid_array_length", ["Invalid array length"],
+ "stack_overflow", ["Maximum call stack size exceeded"],
+ "invalid_time_value", ["Invalid time value"],
+ // SyntaxError
+ "unable_to_parse", ["Parse error"],
+ "invalid_regexp_flags", ["Invalid flags supplied to RegExp constructor '", "%0", "'"],
+ "invalid_regexp", ["Invalid RegExp pattern /", "%0", "/"],
+ "illegal_break", ["Illegal break statement"],
+ "illegal_continue", ["Illegal continue statement"],
+ "illegal_return", ["Illegal return statement"],
+ "illegal_let", ["Illegal let declaration outside extended mode"],
+ "error_loading_debugger", ["Error loading debugger"],
+ "no_input_to_regexp", ["No input to ", "%0"],
+ "invalid_json", ["String '", "%0", "' is not valid JSON"],
+ "circular_structure", ["Converting circular structure to JSON"],
+ "called_on_non_object", ["%0", " called on non-object"],
+ "called_on_null_or_undefined", ["%0", " called on null or undefined"],
+ "array_indexof_not_defined", ["Array.getIndexOf: Argument undefined"],
+ "object_not_extensible", ["Can't add property ", "%0", ", object is not extensible"],
+ "illegal_access", ["Illegal access"],
+ "invalid_preparser_data", ["Invalid preparser data for function ", "%0"],
+ "strict_mode_with", ["Strict mode code may not include a with statement"],
+ "strict_catch_variable", ["Catch variable may not be eval or arguments in strict mode"],
+ "too_many_arguments", ["Too many arguments in function call (only 32766 allowed)"],
+ "too_many_parameters", ["Too many parameters in function definition (only 32766 allowed)"],
+ "too_many_variables", ["Too many variables declared (only 131071 allowed)"],
+ "strict_param_name", ["Parameter name eval or arguments is not allowed in strict mode"],
+ "strict_param_dupe", ["Strict mode function may not have duplicate parameter names"],
+ "strict_var_name", ["Variable name may not be eval or arguments in strict mode"],
+ "strict_function_name", ["Function name may not be eval or arguments in strict mode"],
+ "strict_octal_literal", ["Octal literals are not allowed in strict mode."],
+ "strict_duplicate_property", ["Duplicate data property in object literal not allowed in strict mode"],
+ "accessor_data_property", ["Object literal may not have data and accessor property with the same name"],
+ "accessor_get_set", ["Object literal may not have multiple get/set accessors with the same name"],
+ "strict_lhs_assignment", ["Assignment to eval or arguments is not allowed in strict mode"],
+ "strict_lhs_postfix", ["Postfix increment/decrement may not have eval or arguments operand in strict mode"],
+ "strict_lhs_prefix", ["Prefix increment/decrement may not have eval or arguments operand in strict mode"],
+ "strict_reserved_word", ["Use of future reserved word in strict mode"],
+ "strict_delete", ["Delete of an unqualified identifier in strict mode."],
+ "strict_delete_property", ["Cannot delete property '", "%0", "' of ", "%1"],
+ "strict_const", ["Use of const in strict mode."],
+ "strict_function", ["In strict mode code, functions can only be declared at top level or immediately within another function." ],
+ "strict_read_only_property", ["Cannot assign to read only property '", "%0", "' of ", "%1"],
+ "strict_cannot_assign", ["Cannot assign to read only '", "%0", "' in strict mode"],
+ "strict_poison_pill", ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
+ "strict_caller", ["Illegal access to a strict mode caller function."],
+ "unprotected_let", ["Illegal let declaration in unprotected statement context."],
+ "unprotected_const", ["Illegal const declaration in unprotected statement context."],
+ "cant_prevent_ext_external_array_elements", ["Cannot prevent extension of an object with external array elements"],
+ "redef_external_array_element", ["Cannot redefine a property of an object with external array elements"],
+ "harmony_const_assign", ["Assignment to constant variable."],
+ "invalid_module_path", ["Module does not export '", "%0", "', or export is not itself a module"],
+ "module_type_error", ["Module '", "%0", "' used improperly"],
+ "module_export_undefined", ["Export '", "%0", "' is not defined in module"],
+ ];
+ var messages = { __proto__ : null };
+ for (var i = 0; i < messagesDictionary.length; i += 2) {
+ var key = messagesDictionary[i];
+ var format = messagesDictionary[i + 1];
+
+ for (var j = 0; j < format.length; j++) {
+ %IgnoreAttributesAndSetProperty(format, %_NumberToString(j), format[j],
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+ }
+ %IgnoreAttributesAndSetProperty(format, 'length', format.length,
+ DONT_DELETE | READ_ONLY | DONT_ENUM);
+ %PreventExtensions(format);
+ %IgnoreAttributesAndSetProperty(messages,
+ key,
+ format,
+ DONT_DELETE | DONT_ENUM | READ_ONLY);
+ }
+ %PreventExtensions(messages);
+ %IgnoreAttributesAndSetProperty(builtins, "kMessages",
+ messages,
+ DONT_DELETE | DONT_ENUM | READ_ONLY);
+ }
+ var message_type = %MessageGetType(message);
+ var format = kMessages[message_type];
+ if (!format) return "<unknown message " + message_type + ">";
+ return FormatString(format, message);
}
@@ -751,6 +762,29 @@ function GetStackTraceLine(recv, fun, pos, isGlobal) {
// ----------------------------------------------------------------------------
// Error implementation
+// Defines accessors for a property that is calculated the first time
+// the property is read.
+function DefineOneShotAccessor(obj, name, fun) {
+ // Note that the accessors consistently operate on 'obj', not 'this'.
+ // Since the object may occur in someone else's prototype chain we
+ // can't rely on 'this' being the same as 'obj'.
+ var value;
+ var value_factory = fun;
+ var getter = function() {
+ if (value_factory == null) {
+ return value;
+ }
+ value = value_factory(obj);
+ value_factory = null;
+ return value;
+ };
+ var setter = function(v) {
+ value_factory = null;
+ value = v;
+ };
+ %DefineOrRedefineAccessorProperty(obj, name, getter, setter, DONT_ENUM);
+}
+
function CallSite(receiver, fun, pos) {
this.receiver = receiver;
this.fun = fun;
@@ -1085,22 +1119,9 @@ function captureStackTrace(obj, cons_opt) {
var raw_stack = %CollectStackTrace(obj,
cons_opt ? cons_opt : captureStackTrace,
stackTraceLimit);
- // Note that 'obj' and 'this' maybe different when called on objects that
- // have the error object on its prototype chain. The getter replaces itself
- // with a data property as soon as the stack trace has been formatted.
- var getter = function() {
- var value = FormatRawStackTrace(obj, raw_stack);
- raw_stack = void 0;
- %DefineOrRedefineDataProperty(obj, 'stack', value, NONE);
- return value;
- };
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
- var setter = function(v) {
- %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- };
-
- %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
+ DefineOneShotAccessor(obj, 'stack', function (obj) {
+ return FormatRawStackTrace(obj, raw_stack);
+ });
}
@@ -1139,7 +1160,15 @@ function SetUpError() {
// object. This avoids going through getters and setters defined
// on prototype objects.
%IgnoreAttributesAndSetProperty(this, 'stack', void 0, DONT_ENUM);
- if (!IS_UNDEFINED(m)) {
+ %IgnoreAttributesAndSetProperty(this, 'arguments', void 0, DONT_ENUM);
+ %IgnoreAttributesAndSetProperty(this, 'type', void 0, DONT_ENUM);
+ if (m === kAddMessageAccessorsMarker) {
+ // DefineOneShotAccessor always inserts a message property and
+ // ignores setters.
+ DefineOneShotAccessor(this, 'message', function (obj) {
+ return FormatMessage(%NewMessageObject(obj.type, obj.arguments));
+ });
+ } else if (!IS_UNDEFINED(m)) {
%IgnoreAttributesAndSetProperty(
this, 'message', ToString(m), DONT_ENUM);
}
@@ -1198,9 +1227,15 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
function ErrorToStringDetectCycle(error) {
if (!%PushIfAbsent(visited_errors, error)) throw cyclic_error_marker;
try {
+ var type = GetPropertyWithoutInvokingMonkeyGetters(error, "type");
var name = GetPropertyWithoutInvokingMonkeyGetters(error, "name");
name = IS_UNDEFINED(name) ? "Error" : TO_STRING_INLINE(name);
var message = GetPropertyWithoutInvokingMonkeyGetters(error, "message");
+ var hasMessage = %_CallFunction(error, "message", ObjectHasOwnProperty);
+ if (type && !hasMessage) {
+ var args = GetPropertyWithoutInvokingMonkeyGetters(error, "arguments");
+ message = FormatMessage(%NewMessageObject(type, args));
+ }
message = IS_UNDEFINED(message) ? "" : TO_STRING_INLINE(message);
if (name === "") return message;
if (message === "") return name;
@@ -1232,37 +1267,4 @@ InstallFunctions($Error.prototype, DONT_ENUM, ['toString', ErrorToString]);
// Boilerplate for exceptions for stack overflows. Used from
// Isolate::StackOverflow().
-function SetUpStackOverflowBoilerplate() {
- var boilerplate = MakeRangeError('stack_overflow', []);
-
- // The raw stack trace is stored as hidden property of the copy of this
- // boilerplate error object. Note that the receiver 'this' may not be that
- // error object copy, but can be found on the prototype chain of 'this'.
- // When the stack trace is formatted, this accessor property is replaced by
- // a data property.
- function getter() {
- var holder = this;
- while (!IS_ERROR(holder)) {
- holder = %GetPrototype(holder);
- if (holder == null) return MakeSyntaxError('illegal_access', []);
- }
- var raw_stack = %GetOverflowedRawStackTrace(holder);
- var result = IS_ARRAY(raw_stack) ? FormatRawStackTrace(holder, raw_stack)
- : void 0;
- %DefineOrRedefineDataProperty(holder, 'stack', result, NONE);
- return result;
- }
-
- // The 'stack' property of the receiver is set as data property. If
- // the receiver is the same as holder, this accessor pair is replaced.
- function setter(v) {
- %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
- }
-
- %DefineOrRedefineAccessorProperty(
- boilerplate, 'stack', getter, setter, DONT_ENUM);
-
- return boilerplate;
-}
-
-var kStackOverflowBoilerplate = SetUpStackOverflowBoilerplate();
+var kStackOverflowBoilerplate = MakeRangeError('stack_overflow', []);
diff --git a/deps/v8/src/mips/assembler-mips-inl.h b/deps/v8/src/mips/assembler-mips-inl.h
index caf544f7c..3e726a754 100644
--- a/deps/v8/src/mips/assembler-mips-inl.h
+++ b/deps/v8/src/mips/assembler-mips-inl.h
@@ -1,4 +1,3 @@
-
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
@@ -232,24 +231,6 @@ void RelocInfo::set_target_cell(JSGlobalPropertyCell* cell,
}
-static const int kNoCodeAgeSequenceLength = 7;
-
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- return Code::GetCodeFromTargetAddress(
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Memory::Address_at(pc_ + Assembler::kInstrSize *
- (kNoCodeAgeSequenceLength - 1)) =
- stub->instruction_start();
-}
-
-
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -321,8 +302,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
visitor->VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -349,8 +328,6 @@ void RelocInfo::Visit(Heap* heap) {
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/mips/assembler-mips.cc b/deps/v8/src/mips/assembler-mips.cc
index e7506206c..a4563a64f 100644
--- a/deps/v8/src/mips/assembler-mips.cc
+++ b/deps/v8/src/mips/assembler-mips.cc
@@ -267,11 +267,45 @@ const Instr kLwSwInstrArgumentMask = ~kLwSwInstrTypeMask;
const Instr kLwSwOffsetMask = kImm16Mask;
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
recorded_ast_id_(TypeFeedbackId::None()),
- positions_recorder_(this) {
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
+ // Set up buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
last_trampoline_pool_end_ = 0;
no_trampoline_pool_before_ = 0;
@@ -290,6 +324,18 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
}
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
void Assembler::GetCode(CodeDesc* desc) {
ASSERT(pc_ <= reloc_info_writer.pos()); // No overlap.
// Set up code descriptor.
diff --git a/deps/v8/src/mips/assembler-mips.h b/deps/v8/src/mips/assembler-mips.h
index 75d110a36..59c45c927 100644
--- a/deps/v8/src/mips/assembler-mips.h
+++ b/deps/v8/src/mips/assembler-mips.h
@@ -523,7 +523,13 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ ~Assembler();
+
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ // Dummy for cross platform compatibility.
+ void set_predictable_code_size(bool value) { }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -663,9 +669,7 @@ class Assembler : public AssemblerBase {
PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
// Helper values.
LAST_CODE_MARKER,
- FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
- // Code aging
- CODE_AGE_MARKER_NOP = 6
+ FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
};
// Type == 0 is the default non-marking nop. For mips this is a
@@ -943,6 +947,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ int32_t pc_offset() const { return pc_ - buffer_; }
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Postpone the generation of the trampoline pool for the specified number of
@@ -1027,6 +1033,8 @@ class Assembler : public AssemblerBase {
// the relocation info.
TypeFeedbackId recorded_ast_id_;
+ bool emit_debug_code() const { return emit_debug_code_; }
+
int32_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Decode branch instruction at pos and return branch target pos.
@@ -1085,6 +1093,13 @@ class Assembler : public AssemblerBase {
}
private:
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes.
static const int kBufferCheckInterval = 1*KB/2;
@@ -1095,6 +1110,7 @@ class Assembler : public AssemblerBase {
// not have to check for overflow. The same is true for writes of large
// relocation info entries.
static const int kGap = 32;
+ byte* pc_; // The program counter - moves forward.
// Repeated checking whether the trampoline pool should be emitted is rather
@@ -1269,6 +1285,7 @@ class Assembler : public AssemblerBase {
friend class BlockTrampolinePoolScope;
PositionsRecorder positions_recorder_;
+ bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};
diff --git a/deps/v8/src/mips/builtins-mips.cc b/deps/v8/src/mips/builtins-mips.cc
index b2348fca2..0342e6505 100644
--- a/deps/v8/src/mips/builtins-mips.cc
+++ b/deps/v8/src/mips/builtins-mips.cc
@@ -1255,48 +1255,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- __ mov(a0, ra);
- // Adjust a0 to point to the head of the PlatformCodeAge sequence
- __ Subu(a0, a0,
- Operand((kNoCodeAgeSequenceLength - 1) * Assembler::kInstrSize));
- // Restore the original return address of the function
- __ mov(ra, at);
-
- // The following registers must be saved and restored when calling through to
- // the runtime:
- // a0 - contains return address (beginning of patch sequence)
- // a1 - function object
- RegList saved_regs =
- (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
- FrameScope scope(masm, StackFrame::MANUAL);
- __ MultiPush(saved_regs);
- __ PrepareCallCFunction(1, 0, a1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- __ MultiPop(saved_regs);
- __ Jump(a0);
-}
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
{
diff --git a/deps/v8/src/mips/code-stubs-mips.cc b/deps/v8/src/mips/code-stubs-mips.cc
index a05cee86b..ca3182645 100644
--- a/deps/v8/src/mips/code-stubs-mips.cc
+++ b/deps/v8/src/mips/code-stubs-mips.cc
@@ -42,7 +42,8 @@ namespace internal {
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc);
+ Condition cc,
+ bool never_nan_nan);
static void EmitSmiNonsmiComparison(MacroAssembler* masm,
Register lhs,
Register rhs,
@@ -626,6 +627,24 @@ void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
}
+void FloatingPointHelper::LoadOperands(
+ MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* slow) {
+
+ // Load right operand (a0) to f12 or a2/a3.
+ LoadNumber(masm, destination,
+ a0, f14, a2, a3, heap_number_map, scratch1, scratch2, slow);
+
+ // Load left operand (a1) to f14 or a0/a1.
+ LoadNumber(masm, destination,
+ a1, f12, a0, a1, heap_number_map, scratch1, scratch2, slow);
+}
+
+
void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
Destination destination,
Register object,
@@ -734,13 +753,13 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
FPURegister double_dst,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register scratch2,
FPURegister single_scratch) {
ASSERT(!int_scratch.is(scratch2));
- ASSERT(!int_scratch.is(dst_mantissa));
- ASSERT(!int_scratch.is(dst_exponent));
+ ASSERT(!int_scratch.is(dst1));
+ ASSERT(!int_scratch.is(dst2));
Label done;
@@ -749,65 +768,64 @@ void FloatingPointHelper::ConvertIntToDouble(MacroAssembler* masm,
__ mtc1(int_scratch, single_scratch);
__ cvt_d_w(double_dst, single_scratch);
if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
+ __ Move(dst1, dst2, double_dst);
}
} else {
Label fewer_than_20_useful_bits;
// Expected output:
- // | dst_exponent | dst_mantissa |
+ // | dst2 | dst1 |
// | s | exp | mantissa |
// Check for zero.
- __ mov(dst_exponent, int_scratch);
- __ mov(dst_mantissa, int_scratch);
+ __ mov(dst2, int_scratch);
+ __ mov(dst1, int_scratch);
__ Branch(&done, eq, int_scratch, Operand(zero_reg));
// Preload the sign of the value.
- __ And(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask));
+ __ And(dst2, int_scratch, Operand(HeapNumber::kSignMask));
// Get the absolute value of the object (as an unsigned integer).
Label skip_sub;
- __ Branch(&skip_sub, ge, dst_exponent, Operand(zero_reg));
+ __ Branch(&skip_sub, ge, dst2, Operand(zero_reg));
__ Subu(int_scratch, zero_reg, int_scratch);
__ bind(&skip_sub);
// Get mantissa[51:20].
// Get the position of the first set bit.
- __ Clz(dst_mantissa, int_scratch);
+ __ Clz(dst1, int_scratch);
__ li(scratch2, 31);
- __ Subu(dst_mantissa, scratch2, dst_mantissa);
+ __ Subu(dst1, scratch2, dst1);
// Set the exponent.
- __ Addu(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias));
- __ Ins(dst_exponent, scratch2,
+ __ Addu(scratch2, dst1, Operand(HeapNumber::kExponentBias));
+ __ Ins(dst2, scratch2,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Clear the first non null bit.
__ li(scratch2, Operand(1));
- __ sllv(scratch2, scratch2, dst_mantissa);
+ __ sllv(scratch2, scratch2, dst1);
__ li(at, -1);
__ Xor(scratch2, scratch2, at);
__ And(int_scratch, int_scratch, scratch2);
// Get the number of bits to set in the lower part of the mantissa.
- __ Subu(scratch2, dst_mantissa,
- Operand(HeapNumber::kMantissaBitsInTopWord));
+ __ Subu(scratch2, dst1, Operand(HeapNumber::kMantissaBitsInTopWord));
__ Branch(&fewer_than_20_useful_bits, lt, scratch2, Operand(zero_reg));
// Set the higher 20 bits of the mantissa.
__ srlv(at, int_scratch, scratch2);
- __ or_(dst_exponent, dst_exponent, at);
+ __ or_(dst2, dst2, at);
__ li(at, 32);
__ subu(scratch2, at, scratch2);
- __ sllv(dst_mantissa, int_scratch, scratch2);
+ __ sllv(dst1, int_scratch, scratch2);
__ Branch(&done);
__ bind(&fewer_than_20_useful_bits);
__ li(at, HeapNumber::kMantissaBitsInTopWord);
- __ subu(scratch2, at, dst_mantissa);
+ __ subu(scratch2, at, dst1);
__ sllv(scratch2, int_scratch, scratch2);
- __ Or(dst_exponent, dst_exponent, scratch2);
- // Set dst_mantissa to 0.
- __ mov(dst_mantissa, zero_reg);
+ __ Or(dst2, dst2, scratch2);
+ // Set dst1 to 0.
+ __ mov(dst1, zero_reg);
}
__ bind(&done);
}
@@ -817,9 +835,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DoubleRegister double_dst,
- DoubleRegister double_scratch,
- Register dst_mantissa,
- Register dst_exponent,
+ Register dst1,
+ Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
@@ -835,8 +852,8 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ JumpIfNotSmi(object, &obj_is_not_smi);
__ SmiUntag(scratch1, object);
- ConvertIntToDouble(masm, scratch1, destination, double_dst, dst_mantissa,
- dst_exponent, scratch2, single_scratch);
+ ConvertIntToDouble(masm, scratch1, destination, double_dst, dst1, dst2,
+ scratch2, single_scratch);
__ Branch(&done);
__ bind(&obj_is_not_smi);
@@ -853,10 +870,9 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- scratch1,
+ single_scratch,
double_dst,
- at,
- double_scratch,
+ scratch1,
except_flag,
kCheckForInexactConversion);
@@ -864,51 +880,27 @@ void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm,
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
if (destination == kCoreRegisters) {
- __ Move(dst_mantissa, dst_exponent, double_dst);
+ __ Move(dst1, dst2, double_dst);
}
} else {
ASSERT(!scratch1.is(object) && !scratch2.is(object));
// Load the double value in the destination registers.
- bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent);
- if (save_registers) {
- // Save both output registers, because the other one probably holds
- // an important value too.
- __ Push(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
// Check for 0 and -0.
- Label zero;
- __ And(scratch1, dst_exponent, Operand(~HeapNumber::kSignMask));
- __ Or(scratch1, scratch1, Operand(dst_mantissa));
- __ Branch(&zero, eq, scratch1, Operand(zero_reg));
+ __ And(scratch1, dst1, Operand(~HeapNumber::kSignMask));
+ __ Or(scratch1, scratch1, Operand(dst2));
+ __ Branch(&done, eq, scratch1, Operand(zero_reg));
// Check that the value can be exactly represented by a 32-bit integer.
// Jump to not_int32 if that's not the case.
- Label restore_input_and_miss;
- DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2,
- &restore_input_and_miss);
-
- // dst_* were trashed. Reload the double value.
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ lw(dst_exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
- __ lw(dst_mantissa, FieldMemOperand(object, HeapNumber::kMantissaOffset));
- __ Branch(&done);
-
- __ bind(&restore_input_and_miss);
- if (save_registers) {
- __ Pop(dst_exponent, dst_mantissa);
- }
- __ Branch(not_int32);
+ DoubleIs32BitInteger(masm, dst1, dst2, scratch1, scratch2, not_int32);
- __ bind(&zero);
- if (save_registers) {
- __ Drop(2);
- }
+ // dst1 and dst2 were trashed. Reload the double value.
+ __ lw(dst2, FieldMemOperand(object, HeapNumber::kExponentOffset));
+ __ lw(dst1, FieldMemOperand(object, HeapNumber::kMantissaOffset));
}
__ bind(&done);
@@ -922,8 +914,7 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
Register scratch1,
Register scratch2,
Register scratch3,
- DoubleRegister double_scratch0,
- DoubleRegister double_scratch1,
+ DoubleRegister double_scratch,
Label* not_int32) {
ASSERT(!dst.is(object));
ASSERT(!scratch1.is(object) && !scratch2.is(object) && !scratch3.is(object));
@@ -931,34 +922,36 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
!scratch1.is(scratch3) &&
!scratch2.is(scratch3));
- Label done, maybe_undefined;
+ Label done;
__ UntagAndJumpIfSmi(dst, object, &done);
__ AssertRootValue(heap_number_map,
Heap::kHeapNumberMapRootIndex,
"HeapNumberMap register clobbered.");
-
- __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined);
+ __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32);
// Object is a heap number.
// Convert the floating point value to a 32-bit integer.
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
// Load the double value.
- __ ldc1(double_scratch0, FieldMemOperand(object, HeapNumber::kValueOffset));
+ __ ldc1(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+ FPURegister single_scratch = double_scratch.low();
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- dst,
- double_scratch0,
+ single_scratch,
+ double_scratch,
scratch1,
- double_scratch1,
except_flag,
kCheckForInexactConversion);
// Jump to not_int32 if the operation did not succeed.
__ Branch(not_int32, ne, except_flag, Operand(zero_reg));
+ // Get the result in the destination register.
+ __ mfc1(dst, single_scratch);
+
} else {
// Load the double value in the destination registers.
__ lw(scratch2, FieldMemOperand(object, HeapNumber::kExponentOffset));
@@ -990,28 +983,20 @@ void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm,
__ Subu(dst, zero_reg, dst);
__ bind(&skip_sub);
}
- __ Branch(&done);
-
- __ bind(&maybe_undefined);
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
- __ Branch(not_int32, ne, object, Operand(at));
- // |undefined| is truncated to 0.
- __ li(dst, Operand(Smi::FromInt(0)));
- // Fall through.
__ bind(&done);
}
void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
- Register src_exponent,
- Register src_mantissa,
+ Register src1,
+ Register src2,
Register dst,
Register scratch,
Label* not_int32) {
// Get exponent alone in scratch.
__ Ext(scratch,
- src_exponent,
+ src1,
HeapNumber::kExponentShift,
HeapNumber::kExponentBits);
@@ -1031,11 +1016,11 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Another way to put it is that if (exponent - signbit) > 30 then the
// number cannot be represented as an int32.
Register tmp = dst;
- __ srl(at, src_exponent, 31);
+ __ srl(at, src1, 31);
__ subu(tmp, scratch, at);
__ Branch(not_int32, gt, tmp, Operand(30));
// - Bits [21:0] in the mantissa are not null.
- __ And(tmp, src_mantissa, 0x3fffff);
+ __ And(tmp, src2, 0x3fffff);
__ Branch(not_int32, ne, tmp, Operand(zero_reg));
// Otherwise the exponent needs to be big enough to shift left all the
@@ -1046,20 +1031,20 @@ void FloatingPointHelper::DoubleIs32BitInteger(MacroAssembler* masm,
// Get the 32 higher bits of the mantissa in dst.
__ Ext(dst,
- src_mantissa,
+ src2,
HeapNumber::kMantissaBitsInTopWord,
32 - HeapNumber::kMantissaBitsInTopWord);
- __ sll(at, src_exponent, HeapNumber::kNonMantissaBitsInTopWord);
+ __ sll(at, src1, HeapNumber::kNonMantissaBitsInTopWord);
__ or_(dst, dst, at);
// Create the mask and test the lower bits (of the higher bits).
__ li(at, 32);
__ subu(scratch, at, scratch);
- __ li(src_mantissa, 1);
- __ sllv(src_exponent, src_mantissa, scratch);
- __ Subu(src_exponent, src_exponent, Operand(1));
- __ And(src_exponent, dst, src_exponent);
- __ Branch(not_int32, ne, src_exponent, Operand(zero_reg));
+ __ li(src2, 1);
+ __ sllv(src1, src2, scratch);
+ __ Subu(src1, src1, Operand(1));
+ __ And(src1, dst, src1);
+ __ Branch(not_int32, ne, src1, Operand(zero_reg));
}
@@ -1198,43 +1183,48 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
// for "identity and not NaN".
static void EmitIdenticalObjectComparison(MacroAssembler* masm,
Label* slow,
- Condition cc) {
+ Condition cc,
+ bool never_nan_nan) {
Label not_identical;
Label heap_number, return_equal;
Register exp_mask_reg = t5;
__ Branch(&not_identical, ne, a0, Operand(a1));
- __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
-
- // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
- // so we do the second best thing - test it ourselves.
- // They are both equal and they are not both Smis so both of them are not
- // Smis. If it's not a heap number, then return equal.
- if (cc == less || cc == greater) {
- __ GetObjectType(a0, t4, t4);
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- } else {
- __ GetObjectType(a0, t4, t4);
- __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
- // Comparing JS objects with <=, >= is complicated.
- if (cc != eq) {
- __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
- // Normally here we fall through to return_equal, but undefined is
- // special: (undefined == undefined) == true, but
- // (undefined <= undefined) == false! See ECMAScript 11.8.5.
- if (cc == less_equal || cc == greater_equal) {
- __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
- __ Branch(&return_equal, ne, a0, Operand(t2));
- if (cc == le) {
- // undefined <= undefined should fail.
- __ li(v0, Operand(GREATER));
- } else {
- // undefined >= undefined should fail.
- __ li(v0, Operand(LESS));
+ // The two objects are identical. If we know that one of them isn't NaN then
+ // we now know they test equal.
+ if (cc != eq || !never_nan_nan) {
+ __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+ // Test for NaN. Sadly, we can't just compare to factory->nan_value(),
+ // so we do the second best thing - test it ourselves.
+ // They are both equal and they are not both Smis so both of them are not
+ // Smis. If it's not a heap number, then return equal.
+ if (cc == less || cc == greater) {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ } else {
+ __ GetObjectType(a0, t4, t4);
+ __ Branch(&heap_number, eq, t4, Operand(HEAP_NUMBER_TYPE));
+ // Comparing JS objects with <=, >= is complicated.
+ if (cc != eq) {
+ __ Branch(slow, greater, t4, Operand(FIRST_SPEC_OBJECT_TYPE));
+ // Normally here we fall through to return_equal, but undefined is
+ // special: (undefined == undefined) == true, but
+ // (undefined <= undefined) == false! See ECMAScript 11.8.5.
+ if (cc == less_equal || cc == greater_equal) {
+ __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
+ __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+ __ Branch(&return_equal, ne, a0, Operand(t2));
+ if (cc == le) {
+ // undefined <= undefined should fail.
+ __ li(v0, Operand(GREATER));
+ } else {
+ // undefined >= undefined should fail.
+ __ li(v0, Operand(LESS));
+ }
+ __ Ret();
}
- __ Ret();
}
}
}
@@ -1250,44 +1240,46 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm,
}
__ Ret();
- // For less and greater we don't have to check for NaN since the result of
- // x < x is false regardless. For the others here is some code to check
- // for NaN.
- if (cc != lt && cc != gt) {
- __ bind(&heap_number);
- // It is a heap number, so return non-equal if it's NaN and equal if it's
- // not NaN.
-
- // The representation of NaN values has all exponent bits (52..62) set,
- // and not all mantissa bits (0..51) clear.
- // Read top bits of double representation (second word of value).
- __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
- // Test that exponent bits are all set.
- __ And(t3, t2, Operand(exp_mask_reg));
- // If all bits not set (ne cond), then not a NaN, objects are equal.
- __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
-
- // Shift out flag and all exponent bits, retaining only mantissa.
- __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
- // Or with all low-bits of mantissa.
- __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
- __ Or(v0, t3, Operand(t2));
- // For equal we already have the right value in v0: Return zero (equal)
- // if all bits in mantissa are zero (it's an Infinity) and non-zero if
- // not (it's a NaN). For <= and >= we need to load v0 with the failing
- // value if it's a NaN.
- if (cc != eq) {
- // All-zero means Infinity means equal.
- __ Ret(eq, v0, Operand(zero_reg));
- if (cc == le) {
- __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
- } else {
- __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ if (cc != eq || !never_nan_nan) {
+ // For less and greater we don't have to check for NaN since the result of
+ // x < x is false regardless. For the others here is some code to check
+ // for NaN.
+ if (cc != lt && cc != gt) {
+ __ bind(&heap_number);
+ // It is a heap number, so return non-equal if it's NaN and equal if it's
+ // not NaN.
+
+ // The representation of NaN values has all exponent bits (52..62) set,
+ // and not all mantissa bits (0..51) clear.
+ // Read top bits of double representation (second word of value).
+ __ lw(t2, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ // Test that exponent bits are all set.
+ __ And(t3, t2, Operand(exp_mask_reg));
+ // If all bits not set (ne cond), then not a NaN, objects are equal.
+ __ Branch(&return_equal, ne, t3, Operand(exp_mask_reg));
+
+ // Shift out flag and all exponent bits, retaining only mantissa.
+ __ sll(t2, t2, HeapNumber::kNonMantissaBitsInTopWord);
+ // Or with all low-bits of mantissa.
+ __ lw(t3, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Or(v0, t3, Operand(t2));
+ // For equal we already have the right value in v0: Return zero (equal)
+ // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+ // not (it's a NaN). For <= and >= we need to load v0 with the failing
+ // value if it's a NaN.
+ if (cc != eq) {
+ // All-zero means Infinity means equal.
+ __ Ret(eq, v0, Operand(zero_reg));
+ if (cc == le) {
+ __ li(v0, Operand(GREATER)); // NaN <= NaN should fail.
+ } else {
+ __ li(v0, Operand(LESS)); // NaN >= NaN should fail.
+ }
}
+ __ Ret();
}
- __ Ret();
+ // No fall through here.
}
- // No fall through here.
__ bind(&not_identical);
}
@@ -1760,61 +1752,43 @@ void NumberToStringStub::Generate(MacroAssembler* masm) {
}
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
- Register input,
- Register scratch,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::HEAP_NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
- DONT_DO_SMI_CHECK);
- }
- // We could be strict about symbol/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
-
-// On entry a1 and a2 are the values to be compared.
-// On exit a0 is 0, positive or negative to indicate the result of
-// the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
- Register lhs = a1;
- Register rhs = a0;
- Condition cc = GetCondition();
-
- Label miss;
- ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
- ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
-
+// On entry lhs_ (lhs) and rhs_ (rhs) are the things to be compared.
+// On exit, v0 is 0, positive, or negative (smi) to indicate the result
+// of the comparison.
+void CompareStub::Generate(MacroAssembler* masm) {
Label slow; // Call builtin.
Label not_smis, both_loaded_as_doubles;
- Label not_two_smis, smi_done;
- __ Or(a2, a1, a0);
- __ JumpIfNotSmi(a2, &not_two_smis);
- __ sra(a1, a1, 1);
- __ sra(a0, a0, 1);
- __ Ret(USE_DELAY_SLOT);
- __ subu(v0, a1, a0);
- __ bind(&not_two_smis);
+
+ if (include_smi_compare_) {
+ Label not_two_smis, smi_done;
+ __ Or(a2, a1, a0);
+ __ JumpIfNotSmi(a2, &not_two_smis);
+ __ sra(a1, a1, 1);
+ __ sra(a0, a0, 1);
+ __ Ret(USE_DELAY_SLOT);
+ __ subu(v0, a1, a0);
+ __ bind(&not_two_smis);
+ } else if (FLAG_debug_code) {
+ __ Or(a2, a1, a0);
+ __ And(a2, a2, kSmiTagMask);
+ __ Assert(ne, "CompareStub: unexpected smi operands.",
+ a2, Operand(zero_reg));
+ }
+
// NOTICE! This code is only reached after a smi-fast-case check, so
// it is certain that at least one operand isn't a smi.
// Handle the case where the objects are identical. Either returns the answer
// or goes to slow. Only falls through if the objects were not identical.
- EmitIdenticalObjectComparison(masm, &slow, cc);
+ EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
// If either is a Smi (we know that not both are), then they can only
// be strictly equal if the other is a HeapNumber.
STATIC_ASSERT(kSmiTag == 0);
ASSERT_EQ(0, Smi::FromInt(0));
- __ And(t2, lhs, Operand(rhs));
+ __ And(t2, lhs_, Operand(rhs_));
__ JumpIfNotSmi(t2, &not_smis, t0);
// One operand is a smi. EmitSmiNonsmiComparison generates code that can:
// 1) Return the answer.
@@ -1824,8 +1798,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// In cases 3 and 4 we have found out we were dealing with a number-number
// comparison and the numbers have been loaded into f12 and f14 as doubles,
// or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
- EmitSmiNonsmiComparison(masm, lhs, rhs,
- &both_loaded_as_doubles, &slow, strict());
+ EmitSmiNonsmiComparison(masm, lhs_, rhs_,
+ &both_loaded_as_doubles, &slow, strict_);
__ bind(&both_loaded_as_doubles);
// f12, f14 are the double representations of the left hand side
@@ -1861,7 +1835,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&nan);
// NaN comparisons always fail.
// Load whatever we need in v0 to make the comparison fail.
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
__ li(v0, Operand(GREATER));
} else {
__ li(v0, Operand(LESS));
@@ -1870,20 +1844,20 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
} else {
// Checks for NaN in the doubles we have loaded. Can return the answer or
// fall through if neither is a NaN. Also binds rhs_not_nan.
- EmitNanCheck(masm, cc);
+ EmitNanCheck(masm, cc_);
// Compares two doubles that are not NaNs. Returns the answer.
// Never falls through.
- EmitTwoNonNanDoubleComparison(masm, cc);
+ EmitTwoNonNanDoubleComparison(masm, cc_);
}
__ bind(&not_smis);
// At this point we know we are dealing with two different objects,
// and neither of them is a Smi. The objects are in lhs_ and rhs_.
- if (strict()) {
+ if (strict_) {
// This returns non-equal for some object types, or falls through if it
// was not lucky.
- EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+ EmitStrictTwoHeapObjectCompare(masm, lhs_, rhs_);
}
Label check_for_symbols;
@@ -1893,38 +1867,38 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// that case. If the inputs are not doubles then jumps to check_for_symbols.
// In this case a2 will contain the type of lhs_.
EmitCheckForTwoHeapNumbers(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
&both_loaded_as_doubles,
&check_for_symbols,
&flat_string_check);
__ bind(&check_for_symbols);
- if (cc == eq && !strict()) {
+ if (cc_ == eq && !strict_) {
// Returns an answer for two symbols or two detectable objects.
// Otherwise jumps to string case or not both strings case.
// Assumes that a2 is the type of lhs_ on entry.
- EmitCheckForSymbolsOrObjects(masm, lhs, rhs, &flat_string_check, &slow);
+ EmitCheckForSymbolsOrObjects(masm, lhs_, rhs_, &flat_string_check, &slow);
}
// Check for both being sequential ASCII strings, and inline if that is the
// case.
__ bind(&flat_string_check);
- __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+ __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs_, rhs_, a2, a3, &slow);
__ IncrementCounter(isolate->counters()->string_compare_native(), 1, a2, a3);
- if (cc == eq) {
+ if (cc_ == eq) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
a2,
a3,
t0);
} else {
StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
- lhs,
- rhs,
+ lhs_,
+ rhs_,
a2,
a3,
t0,
@@ -1935,18 +1909,18 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ bind(&slow);
// Prepare for call to builtin. Push object pointers, a0 (lhs) first,
// a1 (rhs) second.
- __ Push(lhs, rhs);
+ __ Push(lhs_, rhs_);
// Figure out which native to call and setup the arguments.
Builtins::JavaScript native;
- if (cc == eq) {
- native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc_ == eq) {
+ native = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
native = Builtins::COMPARE;
int ncr; // NaN compare result.
- if (cc == lt || cc == le) {
+ if (cc_ == lt || cc_ == le) {
ncr = GREATER;
} else {
- ASSERT(cc == gt || cc == ge); // Remaining cases.
+ ASSERT(cc_ == gt || cc_ == ge); // Remaining cases.
ncr = LESS;
}
__ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1956,9 +1930,6 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(native, JUMP_FUNCTION);
-
- __ bind(&miss);
- GenerateMiss(masm);
}
@@ -2399,23 +2370,20 @@ void UnaryOpStub::GenerateGenericCodeFallback(
}
-void BinaryOpStub::Initialize() {
- platform_specific_bit_ = CpuFeatures::IsSupported(FPU);
-}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
Label get_result;
__ Push(a1, a0);
__ li(a2, Operand(Smi::FromInt(MinorKey())));
- __ push(a2);
+ __ li(a1, Operand(Smi::FromInt(op_)));
+ __ li(a0, Operand(Smi::FromInt(operands_type_)));
+ __ Push(a2, a1, a0);
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
@@ -2426,8 +2394,59 @@ void BinaryOpStub::GenerateTypeTransitionWithSavedArgs(
}
-void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
- Token::Value op) {
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ GenerateInt32Stub(masm);
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+}
+
+
+
+void BinaryOpStub::GenerateSmiSmiOperation(MacroAssembler* masm) {
Register left = a1;
Register right = a0;
@@ -2438,7 +2457,7 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
STATIC_ASSERT(kSmiTag == 0);
Label not_smi_result;
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ AdduAndCheckForOverflow(v0, left, right, scratch1);
__ RetOnNoOverflow(scratch1);
@@ -2581,24 +2600,10 @@ void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm,
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode);
-
-
-void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
- BinaryOpIC::TypeInfo left_type,
- BinaryOpIC::TypeInfo right_type,
- bool smi_operands,
- Label* not_numbers,
- Label* gc_required,
- Label* miss,
- Token::Value op,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required) {
Register left = a1;
Register right = a0;
Register scratch1 = t3;
@@ -2610,17 +2615,11 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AssertSmi(left);
__ AssertSmi(right);
}
- if (left_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, miss);
- }
- if (right_type == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, miss);
- }
Register heap_number_map = t2;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- switch (op) {
+ switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
@@ -2630,44 +2629,25 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// depending on whether FPU is available or not.
FloatingPointHelper::Destination destination =
CpuFeatures::IsSupported(FPU) &&
- op != Token::MOD ?
+ op_ != Token::MOD ?
FloatingPointHelper::kFPURegisters :
FloatingPointHelper::kCoreRegisters;
// Allocate new heap number for result.
Register result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required, mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
// Load the operands.
if (smi_operands) {
FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
} else {
- // Load right operand to f14 or a2/a3.
- if (right_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, right, destination, f14, f16, a2, a3, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (right_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, right, f14, a2, a3, heap_number_map,
- scratch1, scratch2, fail);
- }
- // Load left operand to f12 or a0/a1. This keeps a0/a1 intact if it
- // jumps to |miss|.
- if (left_type == BinaryOpIC::INT32) {
- FloatingPointHelper::LoadNumberAsInt32Double(
- masm, left, destination, f12, f16, a0, a1, heap_number_map,
- scratch1, scratch2, f2, miss);
- } else {
- Label* fail = (left_type == BinaryOpIC::HEAP_NUMBER) ? miss
- : not_numbers;
- FloatingPointHelper::LoadNumber(
- masm, destination, left, f12, a0, a1, heap_number_map,
- scratch1, scratch2, fail);
- }
+ FloatingPointHelper::LoadOperands(masm,
+ destination,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ not_numbers);
}
// Calculate the result.
@@ -2676,7 +2656,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// f12: Left value.
// f14: Right value.
CpuFeatures::Scope scope(FPU);
- switch (op) {
+ switch (op_) {
case Token::ADD:
__ add_d(f10, f12, f14);
break;
@@ -2702,7 +2682,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
} else {
// Call the C function to handle the double operation.
FloatingPointHelper::CallCCodeForDoubleOperation(masm,
- op,
+ op_,
result,
scratch1);
if (FLAG_debug_code) {
@@ -2742,7 +2722,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
not_numbers);
}
Label result_not_a_smi;
- switch (op) {
+ switch (op_) {
case Token::BIT_OR:
__ Or(a2, a3, Operand(a2));
break;
@@ -2792,9 +2772,8 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
} else {
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, result, heap_number_map, scratch1, scratch2, gc_required,
- mode);
+ GenerateHeapResultAllocation(
+ masm, result, heap_number_map, scratch1, scratch2, gc_required);
}
// a2: Answer as signed int32.
@@ -2809,7 +2788,7 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// mentioned above SHR needs to always produce a positive result.
CpuFeatures::Scope scope(FPU);
__ mtc1(a2, f0);
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ Cvt_d_uw(f0, f0, f22);
} else {
__ cvt_d_w(f0, f0);
@@ -2836,14 +2815,12 @@ void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm,
// Generate the smi code. If the operation on smis are successful this return is
// generated. If the result is not a smi and heap number allocation is not
// requested the code falls through. If number allocation is requested but a
-// heap number cannot be allocated the code jumps to the label gc_required.
-void BinaryOpStub_GenerateSmiCode(
+// heap number cannot be allocated the code jumps to the lable gc_required.
+void BinaryOpStub::GenerateSmiCode(
MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
- Token::Value op,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- OverwriteMode mode) {
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
Label not_smis;
Register left = a1;
@@ -2856,14 +2833,12 @@ void BinaryOpStub_GenerateSmiCode(
__ JumpIfNotSmi(scratch1, &not_smis);
// If the smi-smi operation results in a smi return is generated.
- BinaryOpStub_GenerateSmiSmiOperation(masm, op);
+ GenerateSmiSmiOperation(masm);
// If heap number results are possible generate the result in an allocated
// heap number.
- if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) {
- BinaryOpStub_GenerateFPOperation(
- masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true,
- use_runtime, gc_required, &not_smis, op, mode);
+ if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
+ GenerateFPOperation(masm, true, use_runtime, gc_required);
}
__ bind(&not_smis);
}
@@ -2875,14 +2850,14 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, NULL, op_, NO_HEAPNUMBER_RESULTS, mode_);
+ GenerateSmiCode(masm, &call_runtime, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS,
- mode_);
+ GenerateSmiCode(masm,
+ &call_runtime,
+ &call_runtime,
+ ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -2890,14 +2865,22 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
GenerateTypeTransition(masm);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateAddStrings(masm);
+ GenerateTypeTransition(masm);
+}
+
+
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -2926,7 +2909,7 @@ void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
+ ASSERT(operands_type_ == BinaryOpIC::INT32);
Register left = a1;
Register right = a0;
@@ -2949,7 +2932,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Label skip;
__ Or(scratch1, left, right);
__ JumpIfNotSmi(scratch1, &skip);
- BinaryOpStub_GenerateSmiSmiOperation(masm, op_);
+ GenerateSmiSmiOperation(masm);
// Fall through if the result is not a smi.
__ bind(&skip);
@@ -2959,15 +2942,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
case Token::MUL:
case Token::DIV:
case Token::MOD: {
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(left, &transition);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- __ JumpIfNotSmi(right, &transition);
- }
// Load both operands and check that they are 32-bit integer.
// Jump to type transition if they are not. The registers a0 and a1 (right
// and left) are preserved for the runtime call.
@@ -2980,7 +2954,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
right,
destination,
f14,
- f16,
a2,
a3,
heap_number_map,
@@ -2992,7 +2965,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
left,
destination,
f12,
- f16,
t0,
t1,
heap_number_map,
@@ -3029,10 +3001,9 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- scratch1,
+ single_scratch,
f10,
- at,
- f16,
+ scratch1,
except_flag);
if (result_type_ <= BinaryOpIC::INT32) {
@@ -3041,6 +3012,7 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
// Check if the result fits in a smi.
+ __ mfc1(scratch1, single_scratch);
__ Addu(scratch2, scratch1, Operand(0x40000000));
// If not try to return a heap number.
__ Branch(&return_heap_number, lt, scratch2, Operand(zero_reg));
@@ -3066,13 +3038,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
: BinaryOpIC::INT32)) {
// We are using FPU registers so s0 is available.
heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
__ mov(v0, heap_number_result);
__ sdc1(f10, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
@@ -3090,13 +3061,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
// Allocate a heap number to store the result.
heap_number_result = s0;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &pop_and_call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &pop_and_call_runtime);
// Load the left value from the value saved on the stack.
__ Pop(a1, a0);
@@ -3135,7 +3105,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
- f2,
&transition);
FloatingPointHelper::LoadNumberAsInt32(masm,
right,
@@ -3145,7 +3114,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
scratch2,
scratch3,
f0,
- f2,
&transition);
// The ECMA-262 standard specifies that, for shift operations, only the
@@ -3207,13 +3175,12 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
__ bind(&return_heap_number);
heap_number_result = t1;
- BinaryOpStub_GenerateHeapResultAllocation(masm,
- heap_number_result,
- heap_number_map,
- scratch1,
- scratch2,
- &call_runtime,
- mode_);
+ GenerateHeapResultAllocation(masm,
+ heap_number_result,
+ heap_number_map,
+ scratch1,
+ scratch2,
+ &call_runtime);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3257,7 +3224,6 @@ void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3296,32 +3262,20 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
- Label call_runtime, transition;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &transition, &call_runtime, &transition, op_, mode_);
-
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ Label call_runtime;
+ GenerateFPOperation(masm, false, &call_runtime, &call_runtime);
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
- Label call_runtime, call_string_add_or_runtime, transition;
-
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_);
+ Label call_runtime, call_string_add_or_runtime;
- BinaryOpStub_GenerateFPOperation(
- masm, left_type_, right_type_, false,
- &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_);
+ GenerateSmiCode(masm, &call_runtime, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- __ bind(&transition);
- GenerateTypeTransition(masm);
+ GenerateFPOperation(masm, false, &call_string_add_or_runtime, &call_runtime);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
@@ -3329,7 +3283,6 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
GenerateCallRuntime(masm);
}
@@ -3365,20 +3318,63 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
-void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Register result,
- Register heap_number_map,
- Register scratch1,
- Register scratch2,
- Label* gc_required,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::GenerateHeapResultAllocation(
+ MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required) {
+
// Code below will scratch result if allocation fails. To keep both arguments
// intact for the runtime call result cannot be one of these.
ASSERT(!result.is(a0) && !result.is(a1));
- if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) {
+ if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
Label skip_allocation, allocated;
- Register overwritable_operand = mode == OVERWRITE_LEFT ? a1 : a0;
+ Register overwritable_operand = mode_ == OVERWRITE_LEFT ? a1 : a0;
// If the overwritable operand is already an object, we skip the
// allocation of a heap number.
__ JumpIfNotSmi(overwritable_operand, &skip_allocation);
@@ -3391,7 +3387,7 @@ void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
__ mov(result, overwritable_operand);
__ bind(&allocated);
} else {
- ASSERT(mode == NO_OVERWRITE);
+ ASSERT(mode_ == NO_OVERWRITE);
__ AllocateHeapNumber(
result, scratch1, scratch2, heap_number_map, gc_required);
}
@@ -3712,10 +3708,9 @@ void MathPowStub::Generate(MacroAssembler* masm) {
Label int_exponent_convert;
// Detect integer exponents stored as double.
__ EmitFPUTruncate(kRoundToMinusInf,
- scratch,
+ single_scratch,
double_exponent,
- at,
- double_scratch,
+ scratch,
scratch2,
kCheckForInexactConversion);
// scratch2 == 0 means there was no conversion error.
@@ -3773,7 +3768,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ push(ra);
{
AllowExternalCallThatCantCauseGC scope(masm);
- __ PrepareCallCFunction(0, 2, scratch2);
+ __ PrepareCallCFunction(0, 2, scratch);
__ SetCallCDoubleArguments(double_base, double_exponent);
__ CallCFunction(
ExternalReference::power_double_double_function(masm->isolate()),
@@ -3784,6 +3779,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ jmp(&done);
__ bind(&int_exponent_convert);
+ __ mfc1(scratch, single_scratch);
}
// Calculate power with integer exponent.
@@ -5087,7 +5083,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// regexp_data: RegExp data (FixedArray)
// a0: Instance type of subject string
STATIC_ASSERT(kStringEncodingMask == 4);
- STATIC_ASSERT(kOneByteStringTag == 4);
+ STATIC_ASSERT(kAsciiStringTag == 4);
STATIC_ASSERT(kTwoByteStringTag == 0);
// Find the code object based on the assumptions above.
__ And(a0, a0, Operand(kStringEncodingMask)); // Non-zero for ASCII.
@@ -5326,7 +5322,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
__ lw(subject,
FieldMemOperand(subject, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Subu(subject,
subject,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -5603,6 +5599,45 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
}
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+void CompareStub::PrintName(StringStream* stream) {
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ const char* cc_name;
+ switch (cc_) {
+ case lt: cc_name = "LT"; break;
+ case gt: cc_name = "GT"; break;
+ case le: cc_name = "LE"; break;
+ case ge: cc_name = "GE"; break;
+ case eq: cc_name = "EQ"; break;
+ case ne: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+ bool is_equality = cc_ == eq || cc_ == ne;
+ stream->Add("CompareStub_%s", cc_name);
+ stream->Add(lhs_.is(a0) ? "_a0" : "_a1");
+ stream->Add(rhs_.is(a0) ? "_a0" : "_a1");
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
+}
+
+
+int CompareStub::MinorKey() {
+ // Encode the two parameters in a unique 16 bit value.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 14));
+ ASSERT((lhs_.is(a0) && rhs_.is(a1)) ||
+ (lhs_.is(a1) && rhs_.is(a0)));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(lhs_.is(a0))
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
// StringCharCodeAtGenerator.
void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
Label flat_string;
@@ -6036,7 +6071,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
// Check if the two characters match.
// Assumes that word load is little endian.
- __ lhu(scratch, FieldMemOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ lhu(scratch, FieldMemOperand(candidate, SeqAsciiString::kHeaderSize));
__ Branch(&found_in_symbol_table, eq, chars, Operand(scratch));
__ bind(&next_probe[i]);
}
@@ -6215,7 +6250,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
@@ -6253,12 +6288,12 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ bind(&sequential_string);
// Locate first character of underlying subject string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ Addu(t1, t1, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
+ __ Addu(t1, t1, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ bind(&allocate_result);
// Sequential acii string. Allocate the result.
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ And(t0, a1, Operand(kStringEncodingMask));
__ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
@@ -6269,13 +6304,13 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ Addu(t1, t1, a3);
// Locate first character of result.
- __ Addu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(a1, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// v0: result string
// a1: first character of result string
// a2: result string length
// t1: first character of substring to copy
- STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ STATIC_ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
StringHelper::GenerateCopyCharactersLong(
masm, a1, t1, a2, a3, t0, t2, t3, t4, COPY_ASCII | DEST_ALWAYS_ALIGNED);
__ jmp(&return_v0);
@@ -6407,7 +6442,7 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiUntag(length);
__ Addu(scratch1, length,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ Addu(left, left, Operand(scratch1));
__ Addu(right, right, Operand(scratch1));
__ Subu(length, zero_reg, length);
@@ -6562,8 +6597,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ lbu(a2, FieldMemOperand(a0, SeqOneByteString::kHeaderSize));
- __ lbu(a3, FieldMemOperand(a1, SeqOneByteString::kHeaderSize));
+ __ lbu(a2, FieldMemOperand(a0, SeqAsciiString::kHeaderSize));
+ __ lbu(a3, FieldMemOperand(a1, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -6581,7 +6616,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// in a little endian mode).
__ li(t2, Operand(2));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ sh(a2, FieldMemOperand(v0, SeqOneByteString::kHeaderSize));
+ __ sh(a2, FieldMemOperand(v0, SeqAsciiString::kHeaderSize));
__ IncrementCounter(counters->string_add_native(), 1, a2, a3);
__ DropAndRet(2);
@@ -6629,6 +6664,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ and_(at, at, t1);
__ Branch(&ascii_data, ne, at, Operand(zero_reg));
+ __ xor_(t0, t0, t1);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ And(t0, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+ __ Branch(&ascii_data, eq, t0, Operand(kAsciiStringTag | kAsciiDataHintTag));
+
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(v0, t2, t0, t1, &call_runtime);
__ Branch(&allocated);
@@ -6660,11 +6700,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t0, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_first_add;
__ Branch(&skip_first_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &first_prepared);
- __ addiu(t3, a0, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ addiu(t3, a0, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_first_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6675,11 +6715,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSeqStringTag == 0);
__ And(t4, t1, Operand(kStringRepresentationMask));
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
Label skip_second_add;
__ Branch(&skip_second_add, ne, t4, Operand(zero_reg));
__ Branch(USE_DELAY_SLOT, &second_prepared);
- __ addiu(a1, a1, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ addiu(a1, a1, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ bind(&skip_second_add);
// External string: rule out short external string and load string resource.
STATIC_ASSERT(kShortExternalStringTag != 0);
@@ -6700,7 +6740,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ Branch(&non_ascii_string_add_flat_result, eq, t4, Operand(zero_reg));
__ AllocateAsciiString(v0, t2, t0, t1, t5, &call_runtime);
- __ Addu(t2, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ Addu(t2, v0, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// v0: result string.
// t3: first character of first string.
// a1: first character of second string
@@ -6788,7 +6828,7 @@ void StringAddStub::GenerateConvertArgument(MacroAssembler* masm,
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ Or(a2, a1, a0);
__ JumpIfNotSmi(a2, &miss);
@@ -6810,18 +6850,18 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBER);
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
+ __ And(a2, a1, Operand(a0));
+ __ JumpIfSmi(a2, &generic_stub);
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a1, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(a0, &miss);
- }
+ __ GetObjectType(a0, a2, a2);
+ __ Branch(&maybe_undefined1, ne, a2, Operand(HEAP_NUMBER_TYPE));
+ __ GetObjectType(a1, a2, a2);
+ __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
// Inlining the double comparison and falling back to the general compare
// stub if NaN is involved or FPU is unsupported.
@@ -6829,33 +6869,10 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
CpuFeatures::Scope scope(FPU);
// Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(a0, &right_smi);
- __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
- DONT_DO_SMI_CHECK);
- __ Subu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&left);
- __ bind(&right_smi);
- __ SmiUntag(a2, a0); // Can't clobber a0 yet.
- FPURegister single_scratch = f6;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f2, single_scratch);
-
- __ bind(&left);
- __ JumpIfSmi(a1, &left_smi);
- __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
- DONT_DO_SMI_CHECK);
__ Subu(a2, a1, Operand(kHeapObjectTag));
__ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
- __ Branch(&done);
- __ bind(&left_smi);
- __ SmiUntag(a2, a1); // Can't clobber a1 yet.
- single_scratch = f8;
- __ mtc1(a2, single_scratch);
- __ cvt_d_w(f0, single_scratch);
-
- __ bind(&done);
+ __ Subu(a2, a0, Operand(kHeapObjectTag));
+ __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
// Return a result of -1, 0, or 1, or use CompareStub for NaNs.
Label fpu_eq, fpu_lt;
@@ -6879,16 +6896,15 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
}
__ bind(&unordered);
+
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
__ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&miss, ne, a0, Operand(at));
- __ JumpIfSmi(a1, &unordered);
__ GetObjectType(a1, a2, a2);
__ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
__ jmp(&unordered);
@@ -6906,7 +6922,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOL);
+ ASSERT(state_ == CompareIC::SYMBOLS);
Label miss;
// Registers containing left and right operands respectively.
@@ -6944,7 +6960,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -7029,7 +7045,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
__ And(a2, a1, Operand(a0));
__ JumpIfSmi(a2, &miss);
@@ -7584,7 +7600,12 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(a0));
__ Move(address, regs_.address());
__ Move(a0, regs_.object());
- __ Move(a1, address);
+ if (mode == INCREMENTAL_COMPACTION) {
+ __ Move(a1, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ lw(a1, MemOperand(address, 0));
+ }
__ li(a2, Operand(ExternalReference::isolate_address()));
AllowExternalCallThatCantCauseGC scope(masm);
@@ -7746,7 +7767,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
// Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
__ bind(&double_elements);
__ lw(t1, FieldMemOperand(a1, JSObject::kElementsOffset));
- __ StoreNumberToDoubleElements(a0, a3,
+ __ StoreNumberToDoubleElements(a0, a3, a1,
// Overwrites all regs after this.
t1, t2, t3, t5, a2,
&slow_elements);
diff --git a/deps/v8/src/mips/code-stubs-mips.h b/deps/v8/src/mips/code-stubs-mips.h
index b560c63e0..e0954d837 100644
--- a/deps/v8/src/mips/code-stubs-mips.h
+++ b/deps/v8/src/mips/code-stubs-mips.h
@@ -143,6 +143,108 @@ class UnaryOpStub: public CodeStub {
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ use_fpu_ = CpuFeatures::IsSupported(FPU);
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ use_fpu_(FPUBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+ bool use_fpu_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 16 bits RRRTTTVOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class FPUBits: public BitField<bool, 9, 1> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | FPUBits::encode(use_fpu_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiSmiOperation(MacroAssembler* masm);
+ void GenerateFPOperation(MacroAssembler* masm,
+ bool smi_operands,
+ Label* not_numbers,
+ Label* gc_required);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* use_runtime,
+ Label* gc_required,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+ void GenerateAddStrings(MacroAssembler* masm);
+ void GenerateCallRuntime(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm,
+ Register result,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* gc_required);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
@@ -622,6 +724,20 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2);
+ // Loads objects from a0 and a1 (right and left in binary operations) into
+ // floating point registers. Depending on the destination the values ends up
+ // either f14 and f12 or in a2/a3 and a0/a1 respectively. If the destination
+ // is floating point registers FPU must be supported. If core registers are
+ // requested when FPU is supported f12 and f14 will still be scratched. If
+ // either a0 or a1 is not a number (not smi and not heap number object) the
+ // not_number label is jumped to with a0 and a1 intact.
+ static void LoadOperands(MacroAssembler* masm,
+ FloatingPointHelper::Destination destination,
+ Register heap_number_map,
+ Register scratch1,
+ Register scratch2,
+ Label* not_number);
+
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
@@ -657,7 +773,6 @@ class FloatingPointHelper : public AllStatic {
Register object,
Destination destination,
FPURegister double_dst,
- FPURegister double_scratch,
Register dst1,
Register dst2,
Register heap_number_map,
@@ -679,8 +794,7 @@ class FloatingPointHelper : public AllStatic {
Register scratch1,
Register scratch2,
Register scratch3,
- FPURegister double_scratch0,
- FPURegister double_scratch1,
+ FPURegister double_scratch,
Label* not_int32);
// Generate non FPU code to check if a double can be exactly represented by a
@@ -720,12 +834,7 @@ class FloatingPointHelper : public AllStatic {
Register heap_number_result,
Register scratch);
- // Loads the objects from |object| into floating point registers.
- // Depending on |destination| the value ends up either in |dst| or
- // in |dst1|/|dst2|. If |destination| is kFPURegisters, then FPU
- // must be supported. If kCoreRegisters are requested and FPU is
- // supported, |dst| will be scratched. If |object| is neither smi nor
- // heap number, |not_number| is jumped to with |object| still intact.
+ private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
diff --git a/deps/v8/src/mips/codegen-mips.cc b/deps/v8/src/mips/codegen-mips.cc
index db313e10e..44e0359e4 100644
--- a/deps/v8/src/mips/codegen-mips.cc
+++ b/deps/v8/src/mips/codegen-mips.cc
@@ -31,11 +31,11 @@
#include "codegen.h"
#include "macro-assembler.h"
-#include "simulator-mips.h"
namespace v8 {
namespace internal {
+#define __ ACCESS_MASM(masm)
UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
switch (type) {
@@ -49,74 +49,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
-#define __ masm.
-
-
-#if defined(USE_SIMULATOR)
-byte* fast_exp_mips_machine_code = NULL;
-double fast_exp_simulator(double x) {
- return Simulator::current(Isolate::Current())->CallFP(
- fast_exp_mips_machine_code, x, 0);
-}
-#endif
-
-
-UnaryMathFunction CreateExpFunction() {
- if (!CpuFeatures::IsSupported(FPU)) return &exp;
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
-
- {
- CpuFeatures::Scope use_fpu(FPU);
- DoubleRegister input = f12;
- DoubleRegister result = f0;
- DoubleRegister double_scratch1 = f4;
- DoubleRegister double_scratch2 = f6;
- Register temp1 = t0;
- Register temp2 = t1;
- Register temp3 = t2;
-
- if (!IsMipsSoftFloatABI) {
- // Input value is in f12 anyway, nothing to do.
- } else {
- __ Move(input, a0, a1);
- }
- __ Push(temp3, temp2, temp1);
- MathExpGenerator::EmitMathExp(
- &masm, input, result, double_scratch1, double_scratch2,
- temp1, temp2, temp3);
- __ Pop(temp3, temp2, temp1);
- if (!IsMipsSoftFloatABI) {
- // Result is already in f0, nothing to do.
- } else {
- __ Move(a0, a1, result);
- }
- __ Ret();
- }
-
- CodeDesc desc;
- masm.GetCode(&desc);
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
-
-#if !defined(USE_SIMULATOR)
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-#else
- fast_exp_mips_machine_code = buffer;
- return &fast_exp_simulator;
-#endif
-}
-
-
-#undef __
-
-
UnaryMathFunction CreateSqrtFunction() {
return &sqrt;
}
@@ -140,8 +72,6 @@ void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
// -------------------------------------------------------------------------
// Code generators
-#define __ ACCESS_MASM(masm)
-
void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm) {
// ----------- S t a t e -------------
@@ -246,7 +176,7 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
HeapObject::kMapOffset,
a3,
t5,
- kRAHasNotBeenSaved,
+ kRAHasBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
@@ -478,7 +408,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ Branch(&external_string, ne, at, Operand(zero_reg));
// Prepare sequential strings
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ Addu(string,
string,
SeqTwoByteString::kHeaderSize - kHeapObjectTag);
@@ -516,196 +446,6 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ bind(&done);
}
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ And(at, index, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi index", at, Operand(zero_reg));
- __ And(at, value, Operand(kSmiTagMask));
- __ Check(eq, "Non-smi value", at, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, String::kLengthOffset));
- __ Check(lt, "Index is too large", index, Operand(at));
-
- __ Check(ge, "Index is negative", index, Operand(zero_reg));
-
- __ lw(at, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
-
- __ And(at, at, Operand(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ Subu(at, at, Operand(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(eq, "Unexpected string type", at, Operand(zero_reg));
- }
-
- __ Addu(at,
- string,
- Operand(SeqString::kHeaderSize - kHeapObjectTag));
- __ SmiUntag(value);
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ SmiUntag(index);
- __ Addu(at, at, index);
- __ sb(value, MemOperand(at));
- } else {
- // No need to untag a smi for two-byte addressing.
- __ Addu(at, at, index);
- __ sh(value, MemOperand(at));
- }
-}
-
-
-static MemOperand ExpConstant(int index, Register base) {
- return MemOperand(base, index * kDoubleSize);
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch1));
- ASSERT(!input.is(double_scratch2));
- ASSERT(!result.is(double_scratch1));
- ASSERT(!result.is(double_scratch2));
- ASSERT(!double_scratch1.is(double_scratch2));
- ASSERT(!temp1.is(temp2));
- ASSERT(!temp1.is(temp3));
- ASSERT(!temp2.is(temp3));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
-
- __ ldc1(double_scratch1, ExpConstant(0, temp3));
- __ Move(result, kDoubleRegZero);
- __ BranchF(&done, NULL, ge, double_scratch1, input);
- __ ldc1(double_scratch2, ExpConstant(1, temp3));
- __ ldc1(result, ExpConstant(2, temp3));
- __ BranchF(&done, NULL, ge, input, double_scratch2);
- __ ldc1(double_scratch1, ExpConstant(3, temp3));
- __ ldc1(result, ExpConstant(4, temp3));
- __ mul_d(double_scratch1, double_scratch1, input);
- __ add_d(double_scratch1, double_scratch1, result);
- __ Move(temp2, temp1, double_scratch1);
- __ sub_d(double_scratch1, double_scratch1, result);
- __ ldc1(result, ExpConstant(6, temp3));
- __ ldc1(double_scratch2, ExpConstant(5, temp3));
- __ mul_d(double_scratch1, double_scratch1, double_scratch2);
- __ sub_d(double_scratch1, double_scratch1, input);
- __ sub_d(result, result, double_scratch1);
- __ mul_d(input, double_scratch1, double_scratch1);
- __ mul_d(result, result, input);
- __ srl(temp1, temp2, 11);
- __ ldc1(double_scratch2, ExpConstant(7, temp3));
- __ mul_d(result, result, double_scratch2);
- __ sub_d(result, result, double_scratch1);
- __ ldc1(double_scratch2, ExpConstant(8, temp3));
- __ add_d(result, result, double_scratch2);
- __ li(at, 0x7ff);
- __ And(temp2, temp2, at);
- __ Addu(temp1, temp1, Operand(0x3ff));
- __ sll(temp1, temp1, 20);
-
- // Must not call ExpConstant() after overwriting temp3!
- __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ sll(at, temp2, 3);
- __ addu(at, at, temp3);
- __ lw(at, MemOperand(at));
- __ Addu(temp3, temp3, Operand(kPointerSize));
- __ sll(temp2, temp2, 3);
- __ addu(temp2, temp2, temp3);
- __ lw(temp2, MemOperand(temp2));
- __ Or(temp1, temp1, temp2);
- __ Move(input, at, temp1);
- __ mul_d(result, result, input);
- __ bind(&done);
-}
-
-
-// nop(CODE_AGE_MARKER_NOP)
-static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found in FUNCTIONS
- static bool initialized = false;
- static uint32_t sequence[kNoCodeAgeSequenceLength];
- byte* byte_sequence = reinterpret_cast<byte*>(sequence);
- *length = kNoCodeAgeSequenceLength * Assembler::kInstrSize;
- if (!initialized) {
- CodePatcher patcher(byte_sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->Push(ra, fp, cp, a1);
- patcher.masm()->LoadRoot(at, Heap::kUndefinedValueRootIndex);
- patcher.masm()->Addu(fp, sp, Operand(2 * kPointerSize));
- initialized = true;
- }
- return byte_sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = !memcmp(sequence, young_sequence, young_length);
- ASSERT(result ||
- Memory::uint32_at(sequence) == kCodeAgePatchFirstInstruction);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- Address target_address = Memory::Address_at(
- sequence + Assembler::kInstrSize * (kNoCodeAgeSequenceLength - 1));
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
- // Mark this code sequence for FindPlatformCodeAgeSequence()
- patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
- // Save the function's original return address
- // (it will be clobbered by Call(t9))
- patcher.masm()->mov(at, ra);
- // Load the stub address to t9 and call it
- patcher.masm()->li(t9,
- Operand(reinterpret_cast<uint32_t>(stub->instruction_start())));
- patcher.masm()->Call(t9);
- // Record the stub address in the empty space for GetCodeAgeAndParity()
- patcher.masm()->dd(reinterpret_cast<uint32_t>(stub->instruction_start()));
- }
-}
-
-
#undef __
} } // namespace v8::internal
diff --git a/deps/v8/src/mips/codegen-mips.h b/deps/v8/src/mips/codegen-mips.h
index 0ed2414a0..e704c4f56 100644
--- a/deps/v8/src/mips/codegen-mips.h
+++ b/deps/v8/src/mips/codegen-mips.h
@@ -90,22 +90,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- DoubleRegister input,
- DoubleRegister result,
- DoubleRegister double_scratch1,
- DoubleRegister double_scratch2,
- Register temp1,
- Register temp2,
- Register temp3);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} } // namespace v8::internal
#endif // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/deps/v8/src/mips/deoptimizer-mips.cc b/deps/v8/src/mips/deoptimizer-mips.cc
index e8ed9ccf7..9fd815bb4 100644
--- a/deps/v8/src/mips/deoptimizer-mips.cc
+++ b/deps/v8/src/mips/deoptimizer-mips.cc
@@ -120,7 +120,7 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
- // This structure comes from FullCodeGenerator::EmitBackEdgeBookkeeping.
+ // This structure comes from FullCodeGenerator::EmitStackCheck.
// The call of the stack guard check has the following form:
// sltu at, sp, t0 / slt at, a3, zero_reg (in case of count based interrupts)
// beq at, zero_reg, ok
@@ -170,7 +170,11 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
// Restore the sltu instruction so beq can be taken again.
CodePatcher patcher(pc_after - 6 * kInstrSize, 1);
- patcher.masm()->slt(at, a3, zero_reg);
+ if (FLAG_count_based_interrupts) {
+ patcher.masm()->slt(at, a3, zero_reg);
+ } else {
+ patcher.masm()->sltu(at, sp, t0);
+ }
// Replace the on-stack replacement address in the load-immediate (lui/ori
// pair) with the entry address of the normal stack-check code.
diff --git a/deps/v8/src/mips/full-codegen-mips.cc b/deps/v8/src/mips/full-codegen-mips.cc
index 0835bf20a..3e89fb43b 100644
--- a/deps/v8/src/mips/full-codegen-mips.cc
+++ b/deps/v8/src/mips/full-codegen-mips.cc
@@ -139,7 +139,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -172,13 +172,12 @@ void FullCodeGenerator::Generate() {
int locals_count = info->scope()->num_stack_slots();
- info->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Load undefined value here, so the value is ready for the loop
- // below.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ if (locals_count > 0) {
+ // Load undefined value here, so the value is ready for the loop
+ // below.
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ }
// Adjust fp to point to caller's fp.
__ Addu(fp, sp, Operand(2 * kPointerSize));
@@ -346,34 +345,45 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
// The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
// to make sure it is constant. Branch may emit a skip-or-jump sequence
// instead of the normal Branch. It seems that the "skip" part of that
// sequence is about as long as this Branch would be so it is safe to ignore
// that.
Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Comment cmnt(masm_, "[ Back edge bookkeeping");
+ Comment cmnt(masm_, "[ Stack check");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ slt(at, a3, zero_reg);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ LoadRoot(t0, Heap::kStackLimitRootIndex);
+ __ sltu(at, sp, t0);
+ __ beq(at, zero_reg, &ok);
+ // CallStub will emit a li t9 first, so it is safe to use the delay slot.
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
- EmitProfilingCounterDecrement(weight);
- __ slt(at, a3, zero_reg);
- __ beq(at, zero_reg, &ok);
- // CallStub will emit a li t9 first, so it is safe to use the delay slot.
- InterruptStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
- EmitProfilingCounterReset();
+ RecordStackCheck(stmt->OsrEntryId());
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -919,33 +929,34 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ Handle<JSModule> instance = declaration->module()->interface()->Instance();
+ ASSERT(!instance.is_null());
- // Load instance object.
- __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
- __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ Visit(declaration->module());
+ break;
+ }
- // Assign it.
- __ sw(a1, ContextOperand(cp, variable->index()));
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- a1,
- a3,
- kRAHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ li(a1, Operand(instance));
+ __ sw(a1, ContextOperand(cp, variable->index()));
+ Visit(declaration->module());
+ break;
+ }
- // Traverse into body.
- Visit(declaration->module());
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
}
@@ -988,14 +999,6 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1248,7 +1251,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ Addu(a0, a0, Operand(Smi::FromInt(1)));
__ push(a0);
- EmitBackEdgeBookkeeping(stmt, &loop);
+ EmitStackCheck(stmt, &loop);
__ Branch(&loop);
// Remove the pointers stored on the stack.
@@ -1396,9 +1399,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ lw(v0, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
__ subu(at, v0, at); // Sub as compare: at == 0 on eq.
if (local->mode() == CONST) {
@@ -2399,7 +2402,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the
// call. Then we call the resolved function using the given
@@ -3146,38 +3149,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(a2);
- __ pop(a1);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, v0, a1, a2);
- context()->Plug(v0);
-}
-
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3685,7 +3656,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
- __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(string, SeqAsciiString::kLengthOffset));
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
__ BranchOnOverflow(&bailout, scratch3);
__ Branch(&loop, lt, element, Operand(elements_end));
@@ -3712,7 +3683,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times array_length) - separator length to the
// string_length to get the length of the result string. array_length is not
// smi but the other values are, so the result is a smi.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ Subu(string_length, string_length, Operand(scratch1));
__ Mult(array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
@@ -3752,10 +3723,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
array_length = no_reg;
__ Addu(result_pos,
result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ lw(scratch1, FieldMemOperand(separator, SeqAsciiString::kLengthOffset));
__ li(at, Operand(Smi::FromInt(1)));
__ Branch(&one_char_separator, eq, scratch1, Operand(at));
__ Branch(&long_separator, gt, scratch1, Operand(at));
@@ -3772,7 +3743,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
@@ -3782,7 +3753,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// One-character separator case.
__ bind(&one_char_separator);
// Replace separator with its ASCII character value.
- __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ __ lbu(separator, FieldMemOperand(separator, SeqAsciiString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator.
__ jmp(&one_char_separator_loop_entry);
@@ -3804,7 +3775,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
@@ -3825,7 +3796,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiUntag(string_length);
__ Addu(string,
separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag));
__ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
@@ -3833,7 +3804,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ Addu(element, element, kPointerSize);
__ lw(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+ __ Addu(string, string, SeqAsciiString::kHeaderSize - kHeapObjectTag);
__ CopyBytes(string, result_pos, string_length, scratch1);
// End while (element < elements_end).
__ Branch(&long_separator_loop, lt, element, Operand(elements_end));
@@ -4129,8 +4100,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
JumpPatchSite patch_site(masm_);
int count_value = expr->op() == Token::INC ? 1 : -1;
+ __ li(a1, Operand(Smi::FromInt(count_value)));
+
if (ShouldInlineSmiCase(expr->op())) {
- __ li(a1, Operand(Smi::FromInt(count_value)));
__ AdduAndCheckForOverflow(v0, a0, a1, t0);
__ BranchOnOverflow(&stub_call, t0); // Do stub on overflow.
@@ -4139,8 +4111,6 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitJumpIfSmi(v0, &done);
__ bind(&stub_call);
}
- __ mov(a1, a0);
- __ li(a0, Operand(Smi::FromInt(count_value)));
// Record position before stub call.
SetSourcePosition(expr->position());
@@ -4363,7 +4333,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
+ Condition cc = eq;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ cc = eq;
+ break;
+ case Token::LT:
+ cc = lt;
+ break;
+ case Token::GT:
+ cc = gt;
+ break;
+ case Token::LTE:
+ cc = le;
+ break;
+ case Token::GTE:
+ cc = ge;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
__ mov(a0, result_register());
__ pop(a1);
diff --git a/deps/v8/src/mips/ic-mips.cc b/deps/v8/src/mips/ic-mips.cc
index 4ac92aff1..cf706815e 100644
--- a/deps/v8/src/mips/ic-mips.cc
+++ b/deps/v8/src/mips/ic-mips.cc
@@ -1268,6 +1268,7 @@ static void KeyedStoreGenerateGenericHelper(
__ bind(&fast_double_without_map_check);
__ StoreNumberToDoubleElements(value,
key,
+ receiver,
elements, // Overwritten.
a3, // Scratch regs...
t0,
@@ -1694,16 +1695,36 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-bool CompareIC::HasInlinedSmiCode(Address address) {
- // The address of the instruction following the call.
- Address andi_instruction_address =
- address + Assembler::kCallTargetAddressOffset;
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+ State state = TargetState(previous_state, false, x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS, a1, a0);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
- // If the instruction following the call is not a andi at, rx, #yyy, nothing
- // was inlined.
- Instr instr = Assembler::instr_at(andi_instruction_address);
- return Assembler::IsAndImmediate(instr) &&
- Assembler::GetRt(instr) == (uint32_t)zero_reg.code();
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
}
diff --git a/deps/v8/src/mips/lithium-codegen-mips.cc b/deps/v8/src/mips/lithium-codegen-mips.cc
index cc589e0b3..4c2182bdb 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.cc
+++ b/deps/v8/src/mips/lithium-codegen-mips.cc
@@ -144,13 +144,7 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- info()->set_prologue_offset(masm_->pc_offset());
- // The following three instructions must remain together and unmodified for
- // code aging to work properly.
__ Push(ra, fp, cp, a1);
- // Add unused load of ip to ensure prologue sequence is identical for
- // full-codegen and lithium-codegen.
- __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Addu(fp, sp, Operand(2 * kPointerSize)); // Adj. FP to point to saved FP.
// Reserve space for the stack slots needed by the code.
@@ -227,30 +221,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
instr->CompileToNative(this);
}
}
@@ -1169,9 +1140,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
// No need to mask the right operand on MIPS, it is built into the variable
// shift instructions.
switch (instr->op()) {
- case Token::ROR:
- __ Ror(result, left, Operand(ToRegister(right_op)));
- break;
case Token::SAR:
__ srav(result, left, ToRegister(right_op));
break;
@@ -1193,13 +1161,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right_op));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ Ror(result, left, Operand(shift_count));
- } else {
- __ Move(result, left);
- }
- break;
case Token::SAR:
if (shift_count != 0) {
__ sra(result, left, shift_count);
@@ -1389,15 +1350,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
void LCodeGen::DoBitNotI(LBitNotI* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
@@ -2670,89 +2622,50 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
- Register key = no_reg;
- ElementsKind elements_kind = instr->elements_kind();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(elements_kind);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- int additional_offset = instr->additional_index() << element_size_shift;
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register elements = ToRegister(instr->elements());
+ Register result = ToRegister(instr->result());
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
- elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- FPURegister result = ToDoubleRegister(instr->result());
- if (key_is_constant) {
- __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ if (instr->key()->IsConstantOperand()) {
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ Register key = EmitLoadRegister(instr->key(), scratch);
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
} else {
- __ sll(scratch0(), key, shift_size);
- __ Addu(scratch0(), scratch0(), external_pointer);
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
}
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ lw(result, FieldMemOperand(store_base, offset));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), additional_offset));
- __ cvt_d_s(result, result);
- } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), additional_offset));
- }
- } else {
- Register result = ToRegister(instr->result());
- MemOperand mem_operand = PrepareKeyedOperand(
- key, external_pointer, key_is_constant, constant_key,
- element_size_shift, shift_size,
- instr->additional_index(), additional_offset);
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ lb(result, mem_operand);
- break;
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ lbu(result, mem_operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ lh(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ lhu(result, mem_operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ lw(result, mem_operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ lw(result, mem_operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- DeoptimizeIf(Ugreater_equal, instr->environment(),
- result, Operand(0x80000000));
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ __ And(scratch, result, Operand(kSmiTagMask));
+ DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+ } else {
+ __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
}
}
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
Register elements = ToRegister(instr->elements());
bool key_is_constant = instr->key()->IsConstantOperand();
Register key = no_reg;
@@ -2794,59 +2707,6 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
}
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register elements = ToRegister(instr->elements());
- Register result = ToRegister(instr->result());
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- if (instr->key()->IsConstantOperand()) {
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- Register key = EmitLoadRegister(instr->key(), scratch0());
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ lw(result, FieldMemOperand(store_base, offset));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- __ And(scratch, result, Operand(kSmiTagMask));
- DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
- } else {
- __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
MemOperand LCodeGen::PrepareKeyedOperand(Register key,
Register base,
bool key_is_constant,
@@ -2891,6 +2751,89 @@ MemOperand LCodeGen::PrepareKeyedOperand(Register key,
}
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ Register external_pointer = ToRegister(instr->external_pointer());
+ Register key = no_reg;
+ ElementsKind elements_kind = instr->elements_kind();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(elements_kind);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ int additional_offset = instr->additional_index() << element_size_shift;
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
+ elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ FPURegister result = ToDoubleRegister(instr->result());
+ if (key_is_constant) {
+ __ Addu(scratch0(), external_pointer, constant_key << element_size_shift);
+ } else {
+ __ sll(scratch0(), key, shift_size);
+ __ Addu(scratch0(), scratch0(), external_pointer);
+ }
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ __ lwc1(result, MemOperand(scratch0(), additional_offset));
+ __ cvt_d_s(result, result);
+ } else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+ __ ldc1(result, MemOperand(scratch0(), additional_offset));
+ }
+ } else {
+ Register result = ToRegister(instr->result());
+ MemOperand mem_operand = PrepareKeyedOperand(
+ key, external_pointer, key_is_constant, constant_key,
+ element_size_shift, shift_size,
+ instr->additional_index(), additional_offset);
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ lb(result, mem_operand);
+ break;
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ lbu(result, mem_operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ lh(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ lhu(result, mem_operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ lw(result, mem_operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ DeoptimizeIf(Ugreater_equal, instr->environment(),
+ result, Operand(0x80000000));
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a1));
ASSERT(ToRegister(instr->key()).is(a0));
@@ -3271,19 +3214,22 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
+ FPURegister single_scratch = double_scratch0().low();
Register scratch1 = scratch0();
Register except_flag = ToRegister(instr->temp());
__ EmitFPUTruncate(kRoundToMinusInf,
- result,
+ single_scratch,
input,
scratch1,
- double_scratch0(),
except_flag);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ // Load the result.
+ __ mfc1(result, single_scratch);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
Label done;
@@ -3299,7 +3245,6 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
DoubleRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
Register scratch = scratch0();
Label done, check_sign_on_zero;
@@ -3351,15 +3296,17 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
Register except_flag = scratch;
+
__ EmitFPUTruncate(kRoundToMinusInf,
- result,
+ double_scratch0().low(),
double_scratch0(),
- at,
- double_scratch1,
+ result,
except_flag);
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ __ mfc1(result, double_scratch0().low());
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
// Test for -0.
__ Branch(&done, ne, result, Operand(zero_reg));
@@ -3509,20 +3456,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- DoubleRegister input = ToDoubleRegister(instr->value());
- DoubleRegister result = ToDoubleRegister(instr->result());
- DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
- DoubleRegister double_scratch2 = double_scratch0();
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(
- masm(), input, result, double_scratch1, double_scratch2,
- temp1, temp2, scratch0());
-}
-
-
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(f4));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3804,8 +3737,108 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- Register external_pointer = ToRegister(instr->elements());
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
+ Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+ Register scratch = scratch0();
+ Register store_base = scratch;
+ int offset = 0;
+
+ // Do the store.
+ if (instr->key()->IsConstantOperand()) {
+ ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+ LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+ offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
+ instr->additional_index());
+ store_base = elements;
+ } else {
+ // Even though the HLoadKeyedFastElement instruction forces the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
+ __ addu(scratch, elements, scratch);
+ } else {
+ __ sll(scratch, key, kPointerSizeLog2);
+ __ addu(scratch, elements, scratch);
+ }
+ offset = FixedArray::OffsetOfElementAt(instr->additional_index());
+ }
+ __ sw(value, FieldMemOperand(store_base, offset));
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
+ __ RecordWrite(elements,
+ key,
+ value,
+ kRAHasBeenSaved,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
+ }
+}
+
+
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
+ DoubleRegister value = ToDoubleRegister(instr->value());
+ Register elements = ToRegister(instr->elements());
+ Register key = no_reg;
+ Register scratch = scratch0();
+ bool key_is_constant = instr->key()->IsConstantOperand();
+ int constant_key = 0;
+ Label not_nan;
+
+ // Calculate the effective address of the slot in the array to store the
+ // double value.
+ if (key_is_constant) {
+ constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+ if (constant_key & 0xF0000000) {
+ Abort("array index constant value too big.");
+ }
+ } else {
+ key = ToRegister(instr->key());
+ }
+ int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+ int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
+ ? (element_size_shift - kSmiTagSize) : element_size_shift;
+ if (key_is_constant) {
+ __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
+ FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ } else {
+ __ sll(scratch, key, shift_size);
+ __ Addu(scratch, elements, Operand(scratch));
+ __ Addu(scratch, scratch,
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+ }
+
+ if (instr->NeedsCanonicalization()) {
+ Label is_nan;
+ // Check for NaN. All NaNs must be canonicalized.
+ __ BranchF(NULL, &is_nan, eq, value, value);
+ __ Branch(&not_nan);
+
+ // Only load canonical NaN if the comparison above set the overflow.
+ __ bind(&is_nan);
+ __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
+ }
+
+ __ bind(&not_nan);
+ __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
+ element_size_shift));
+}
+
+
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+
+ Register external_pointer = ToRegister(instr->external_pointer());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
bool key_is_constant = instr->key()->IsConstantOperand();
@@ -3876,117 +3909,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
}
}
-
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
- DoubleRegister value = ToDoubleRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = no_reg;
- Register scratch = scratch0();
- bool key_is_constant = instr->key()->IsConstantOperand();
- int constant_key = 0;
- Label not_nan;
-
- // Calculate the effective address of the slot in the array to store the
- // double value.
- if (key_is_constant) {
- constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
- if (constant_key & 0xF0000000) {
- Abort("array index constant value too big.");
- }
- } else {
- key = ToRegister(instr->key());
- }
- int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
- int shift_size = (instr->hydrogen()->key()->representation().IsTagged())
- ? (element_size_shift - kSmiTagSize) : element_size_shift;
- if (key_is_constant) {
- __ Addu(scratch, elements, Operand((constant_key << element_size_shift) +
- FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- } else {
- __ sll(scratch, key, shift_size);
- __ Addu(scratch, elements, Operand(scratch));
- __ Addu(scratch, scratch,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
- }
-
- if (instr->NeedsCanonicalization()) {
- Label is_nan;
- // Check for NaN. All NaNs must be canonicalized.
- __ BranchF(NULL, &is_nan, eq, value, value);
- __ Branch(&not_nan);
-
- // Only load canonical NaN if the comparison above set the overflow.
- __ bind(&is_nan);
- __ Move(value, FixedDoubleArray::canonical_not_the_hole_nan_as_double());
- }
-
- __ bind(&not_nan);
- __ sdc1(value, MemOperand(scratch, instr->additional_index() <<
- element_size_shift));
-}
-
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
- : no_reg;
- Register scratch = scratch0();
- Register store_base = scratch;
- int offset = 0;
-
- // Do the store.
- if (instr->key()->IsConstantOperand()) {
- ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
- LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
- offset = FixedArray::OffsetOfElementAt(ToInteger32(const_operand) +
- instr->additional_index());
- store_base = elements;
- } else {
- // Even though the HLoadKeyed instruction forces the input
- // representation for the key to be an integer, the input gets replaced
- // during bound check elimination with the index argument to the bounds
- // check, which can be tagged, so that case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ sll(scratch, key, kPointerSizeLog2 - kSmiTagSize);
- __ addu(scratch, elements, scratch);
- } else {
- __ sll(scratch, key, kPointerSizeLog2);
- __ addu(scratch, elements, scratch);
- }
- offset = FixedArray::OffsetOfElementAt(instr->additional_index());
- }
- __ sw(value, FieldMemOperand(store_base, offset));
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- __ Addu(key, store_base, Operand(offset - kHeapObjectTag));
- __ RecordWrite(elements,
- key,
- value,
- kRAHasBeenSaved,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- // By cases: external, fast double
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(a2));
ASSERT(ToRegister(instr->key()).is(a1));
@@ -4016,7 +3938,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
__ li(new_map_reg, Operand(to_map));
- if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+ if (IsFastSmiElementsKind(from_kind) && IsFastObjectElementsKind(to_kind)) {
__ sw(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg,
@@ -4268,7 +4190,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
if (FLAG_inline_new) {
__ LoadRoot(t2, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t1, a3, t0, t2, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(t1, a3, t0, t2, &slow);
__ Move(dst, t1);
__ Branch(&done);
}
@@ -4282,13 +4204,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ StoreToSafepointRegisterSlot(zero_reg, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, v0);
- __ Subu(dst, dst, kHeapObjectTag);
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, MemOperand(dst, HeapNumber::kValueOffset));
- __ Addu(dst, dst, kHeapObjectTag);
+ __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@@ -4313,16 +4233,12 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
if (FLAG_inline_new) {
__ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
- // We want the untagged address first for performance
- __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
- DONT_TAG_RESULT);
+ __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
} else {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
- // Now that we have finished with the object's real address tag it
- __ Addu(reg, reg, kHeapObjectTag);
+ __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4335,7 +4251,6 @@ void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
- __ Subu(v0, v0, kHeapObjectTag);
__ StoreToSafepointRegisterSlot(v0, reg);
}
@@ -4417,7 +4332,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_scratch = double_scratch0();
- DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
+ FPURegister single_scratch = double_scratch.low();
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
@@ -4433,7 +4348,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch.low();
+ DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp3());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
!scratch3.is(scratch2));
@@ -4468,16 +4383,18 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToZero,
- input_reg,
+ single_scratch,
double_scratch,
scratch1,
- double_scratch2,
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed.
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+ // Load the result.
+ __ mfc1(input_reg, single_scratch);
+
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ Branch(&done, ne, input_reg, Operand(zero_reg));
@@ -4539,10 +4456,10 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register scratch1 = scratch0();
Register scratch2 = ToRegister(instr->temp());
DoubleRegister double_input = ToDoubleRegister(instr->value());
+ FPURegister single_scratch = double_scratch0().low();
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
- FPURegister single_scratch = double_scratch0().low();
__ EmitECMATruncate(result_reg,
double_input,
single_scratch,
@@ -4553,15 +4470,17 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
Register except_flag = scratch2;
__ EmitFPUTruncate(kRoundToMinusInf,
- result_reg,
+ single_scratch,
double_input,
scratch1,
- double_scratch0(),
except_flag,
kCheckForInexactConversion);
// Deopt if the operation did not succeed (except_flag != 0).
DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+
+ // Load the result.
+ __ mfc1(result_reg, single_scratch);
}
}
@@ -4718,7 +4637,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register temp1 = ToRegister(instr->temp());
Register temp2 = ToRegister(instr->temp2());
diff --git a/deps/v8/src/mips/lithium-codegen-mips.h b/deps/v8/src/mips/lithium-codegen-mips.h
index 7363eb8ef..38c5255a4 100644
--- a/deps/v8/src/mips/lithium-codegen-mips.h
+++ b/deps/v8/src/mips/lithium-codegen-mips.h
@@ -376,12 +376,6 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt();
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
diff --git a/deps/v8/src/mips/lithium-mips.cc b/deps/v8/src/mips/lithium-mips.cc
index 521b38d0c..0b6dcaea5 100644
--- a/deps/v8/src/mips/lithium-mips.cc
+++ b/deps/v8/src/mips/lithium-mips.cc
@@ -177,7 +177,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
case Token::SHL: return "sll-t";
case Token::SAR: return "sra-t";
case Token::SHR: return "srl-t";
@@ -297,11 +296,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -378,27 +372,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
+ stream->Add("] <- ");
value()->PrintTo(stream);
}
@@ -715,13 +702,15 @@ LInstruction* LChunkBuilder::DoShift(Token::Value op,
right = UseRegisterAtStart(right_value);
}
- // Shift operations can only deoptimize if we do a logical shift
- // by 0 and the result cannot be truncated to int32.
bool does_deopt = false;
- if (op == Token::SHR && constant_value == 0) {
- if (FLAG_opt_safe_uint32_operations) {
- does_deopt = !instr->CheckFlag(HInstruction::kUint32);
- } else {
+
+ if (FLAG_opt_safe_uint32_operations) {
+ does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+ } else {
+ // Shift operations can only deoptimize if we do a logical shift
+ // by 0 and the result cannot be truncated to int32.
+ bool may_deopt = (op == Token::SHR && constant_value == 0);
+ if (may_deopt) {
for (HUseIterator it(instr->uses()); !it.Done(); it.Advance()) {
if (!it.value()->CheckFlag(HValue::kTruncatingToInt32)) {
does_deopt = true;
@@ -1045,15 +1034,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), f4);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, f4), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* input = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LOperand* double_temp = FixedTemp(f6); // Chosen by fair dice roll.
- LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
- return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
// Input cannot be the same as the result.
// See lithium-codegen-mips.cc::DoMathPowHalf.
@@ -1063,9 +1043,7 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
return DefineFixedDouble(result, f4);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
-
- LOperand* temp = (op == kMathRound) ? FixedTemp(f6) :
- (op == kMathFloor) ? TempRegister() : NULL;
+ LOperand* temp = (op == kMathFloor) ? TempRegister() : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
@@ -1132,11 +1110,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1377,7 +1350,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1531,16 +1504,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- LOperand* value = UseRegister(instr->value());
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineAsRegister(result);
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = UseRegister(instr->length());
@@ -1592,7 +1555,8 @@ LInstruction* LChunkBuilder::DoChange(HChange* instr) {
LOperand* temp1 = TempRegister();
LOperand* temp2 = instr->CanTruncateToInt32() ? TempRegister()
: NULL;
- LOperand* temp3 = FixedTemp(f22);
+ LOperand* temp3 = instr->CanTruncateToInt32() ? FixedTemp(f22)
+ : NULL;
res = DefineSameAsFirst(new(zone()) LTaggedToI(value,
temp1,
temp2,
@@ -1664,10 +1628,10 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp1 = TempRegister();
+ LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
- LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
- return AssignEnvironment(Define(result, temp1));
+ LInstruction* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
+ return AssignEnvironment(result);
}
@@ -1836,40 +1800,53 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
+ LOperand* obj = UseRegisterAtStart(instr->object());
LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LLoadKeyedFastElement* result = new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
+}
- if (!instr->is_external()) {
- LOperand* obj = NULL;
- if (instr->representation().IsDouble()) {
- obj = UseTempRegister(instr->elements());
- } else {
- ASSERT(instr->representation().IsTagged());
- obj = UseRegisterAtStart(instr->elements());
- }
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
- DefineAsRegister(result);
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* elements = UseTempRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ LOperand* key = UseRegisterOrConstant(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
- return can_deoptimize ? AssignEnvironment(result) : result;
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1883,49 +1860,66 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* val = NULL;
- LOperand* key = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- key = UseRegisterOrConstantAtStart(instr->key());
- val = UseTempRegister(instr->value());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = needs_write_barrier ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ LOperand* key = needs_write_barrier
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
- return new(zone()) LStoreKeyed(object, key, val);
- }
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
+ LOperand* key = UseRegisterOrConstant(instr->key());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key,
+ val);
}
@@ -2148,7 +2142,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
+ for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/deps/v8/src/mips/lithium-mips.h b/deps/v8/src/mips/lithium-mips.h
index b2ed72a56..3a9aa7acc 100644
--- a/deps/v8/src/mips/lithium-mips.h
+++ b/deps/v8/src/mips/lithium-mips.h
@@ -125,13 +125,14 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
- V(MathExp) \
V(MathMinMax) \
V(ModI) \
V(MulI) \
@@ -148,7 +149,6 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
- V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -156,8 +156,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -618,7 +620,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->representation().IsDouble();
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -643,30 +645,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 3> {
- public:
- LMathExp(LOperand* value,
- LOperand* double_temp,
- LOperand* temp1,
- LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- temps_[2] = double_temp;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
- LOperand* double_temp() { return temps_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1144,30 +1122,6 @@ class LDateField: public LTemplateInstruction<1, 1, 1> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1383,26 +1337,59 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
}
- bool is_external() const {
- return hydrogen()->is_external();
+
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const {
+ return hydrogen()->elements_kind();
+ }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1916,28 +1903,51 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
- LOperand* elements() { return inputs_[0]; }
+ LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const {
- return hydrogen()->elements_kind();
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = value;
}
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
- bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+
+ bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
};
@@ -1961,6 +1971,28 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
StrictModeFlag strict_mode_flag() { return hydrogen()->strict_mode_flag(); }
};
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
class LTransitionElementsKind: public LTemplateInstruction<1, 1, 2> {
public:
@@ -2083,7 +2115,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
diff --git a/deps/v8/src/mips/macro-assembler-mips.cc b/deps/v8/src/mips/macro-assembler-mips.cc
index 9249917c3..052387ab0 100644
--- a/deps/v8/src/mips/macro-assembler-mips.cc
+++ b/deps/v8/src/mips/macro-assembler-mips.cc
@@ -1395,68 +1395,49 @@ void MacroAssembler::ConvertToInt32(Register source,
void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
+ FPURegister result,
DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
+ Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact) {
- ASSERT(!result.is(scratch));
- ASSERT(!double_input.is(double_scratch));
- ASSERT(!except_flag.is(scratch));
-
ASSERT(CpuFeatures::IsSupported(FPU));
CpuFeatures::Scope scope(FPU);
- Label done;
-
- // Clear the except flag (0 = no exception)
- mov(except_flag, zero_reg);
-
- // Test for values that can be exactly represented as a signed 32-bit integer.
- cvt_w_d(double_scratch, double_input);
- mfc1(result, double_scratch);
- cvt_d_w(double_scratch, double_scratch);
- BranchF(&done, NULL, eq, double_input, double_scratch);
int32_t except_mask = kFCSRFlagMask; // Assume interested in all exceptions.
if (check_inexact == kDontCheckForInexactConversion) {
- // Ignore inexact exceptions.
+ // Ingore inexact exceptions.
except_mask &= ~kFCSRInexactFlagMask;
}
// Save FCSR.
- cfc1(scratch, FCSR);
+ cfc1(scratch1, FCSR);
// Disable FPU exceptions.
ctc1(zero_reg, FCSR);
// Do operation based on rounding mode.
switch (rounding_mode) {
case kRoundToNearest:
- Round_w_d(double_scratch, double_input);
+ Round_w_d(result, double_input);
break;
case kRoundToZero:
- Trunc_w_d(double_scratch, double_input);
+ Trunc_w_d(result, double_input);
break;
case kRoundToPlusInf:
- Ceil_w_d(double_scratch, double_input);
+ Ceil_w_d(result, double_input);
break;
case kRoundToMinusInf:
- Floor_w_d(double_scratch, double_input);
+ Floor_w_d(result, double_input);
break;
} // End of switch-statement.
// Retrieve FCSR.
cfc1(except_flag, FCSR);
// Restore FCSR.
- ctc1(scratch, FCSR);
- // Move the converted value into the result register.
- mfc1(result, double_scratch);
+ ctc1(scratch1, FCSR);
// Check for fpu exceptions.
And(except_flag, except_flag, Operand(except_mask));
-
- bind(&done);
}
@@ -3128,9 +3109,9 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string
// while observing object alignment.
- ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+ ASSERT((SeqAsciiString::kHeaderSize & kObjectAlignmentMask) == 0);
ASSERT(kCharSize == 1);
- addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+ addiu(scratch1, length, kObjectAlignmentMask + SeqAsciiString::kHeaderSize);
And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
// Allocate ASCII string in new space.
@@ -3234,8 +3215,7 @@ void MacroAssembler::AllocateHeapNumber(Register result,
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* need_gc,
- TaggingMode tagging_mode) {
+ Label* need_gc) {
// Allocate an object in the heap for the heap number and tag it as a heap
// object.
AllocateInNewSpace(HeapNumber::kSize,
@@ -3243,16 +3223,11 @@ void MacroAssembler::AllocateHeapNumber(Register result,
scratch1,
scratch2,
need_gc,
- tagging_mode == TAG_RESULT ? TAG_OBJECT :
- NO_ALLOCATION_FLAGS);
+ TAG_OBJECT);
// Store heap number map in the allocated object.
AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- if (tagging_mode == TAG_RESULT) {
- sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
- } else {
- sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
- }
+ sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
}
@@ -3405,13 +3380,13 @@ void MacroAssembler::CheckFastSmiElements(Register map,
void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset) {
+ Label* fail) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@@ -3437,10 +3412,8 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&have_double_value);
sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, elements_reg);
- sw(mantissa_reg, FieldMemOperand(
- scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
- uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
- sizeof(kHoleNanLower32);
+ sw(mantissa_reg, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize));
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
sw(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
@@ -3460,8 +3433,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
bind(&smi_value);
Addu(scratch1, elements_reg,
- Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
- elements_offset));
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
Addu(scratch1, scratch1, scratch2);
// scratch1 is now effective address of the double element
@@ -3976,14 +3948,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
Addu(s2, s2, Operand(1));
sw(s2, MemOperand(s3, kLevelOffset));
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// The O32 ABI requires us to pass a pointer in a0 where the returned struct
// (4 bytes) will be placed. This is also built into the Simulator.
// Set up the pointer to the returned value (a0). It was allocated in
@@ -3996,14 +3960,6 @@ void MacroAssembler::CallApiFunctionAndReturn(ExternalReference function,
DirectCEntryStub stub;
stub.GenerateCall(this, function);
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0, a0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// As mentioned above, on MIPS a pointer is returned - we need to dereference
// it to get the actual return value (which is also a pointer).
lw(v0, MemOperand(v0));
@@ -4940,10 +4896,8 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
Register scratch2,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
ASSERT(kFlatAsciiStringTag <= 0xffff); // Ensure this fits 16-bit immed.
andi(scratch1, first, kFlatAsciiStringMask);
Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
@@ -4956,10 +4910,8 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
Register scratch,
Label* failure) {
int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
int kFlatAsciiStringTag = ASCII_STRING_TYPE;
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
And(scratch, type, Operand(kFlatAsciiStringMask));
Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
}
@@ -5320,7 +5272,7 @@ void MacroAssembler::EnsureNotWhite(
// For ASCII (char-size of 1) we shift the smi tag away to get the length.
// For UC16 (char-size of 2) we just leave the smi tag in place, thereby
// getting the length multiplied by 2.
- ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+ ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4);
ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
lw(t9, FieldMemOperand(value, String::kLengthOffset));
And(t8, instance_type, Operand(kStringEncodingMask));
diff --git a/deps/v8/src/mips/macro-assembler-mips.h b/deps/v8/src/mips/macro-assembler-mips.h
index 474772e0b..b57e51486 100644
--- a/deps/v8/src/mips/macro-assembler-mips.h
+++ b/deps/v8/src/mips/macro-assembler-mips.h
@@ -65,14 +65,6 @@ enum AllocationFlags {
SIZE_IN_WORDS = 1 << 2
};
-// Flags used for AllocateHeapNumber
-enum TaggingMode {
- // Tag the result.
- TAG_RESULT,
- // Don't tag
- DONT_TAG_RESULT
-};
-
// Flags used for the ObjectToDoubleFPURegister function.
enum ObjectToDoubleFlags {
// No special flags.
@@ -544,8 +536,7 @@ class MacroAssembler: public Assembler {
Register scratch1,
Register scratch2,
Register heap_number_map,
- Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT);
+ Label* gc_required);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -629,7 +620,6 @@ class MacroAssembler: public Assembler {
// Push a handle.
void Push(Handle<Object> handle);
- void Push(Smi* smi) { Push(Handle<Smi>(smi)); }
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
@@ -762,16 +752,14 @@ class MacroAssembler: public Assembler {
FPURegister double_scratch,
Label *not_int32);
- // Truncates a double using a specific rounding mode, and writes the value
- // to the result register.
+ // Truncates a double using a specific rounding mode.
// The except_flag will contain any exceptions caused by the instruction.
- // If check_inexact is kDontCheckForInexactConversion, then the inexact
+ // If check_inexact is kDontCheckForInexactConversion, then the inexacat
// exception is masked.
void EmitFPUTruncate(FPURoundingMode rounding_mode,
- Register result,
+ FPURegister result,
DoubleRegister double_input,
- Register scratch,
- DoubleRegister double_scratch,
+ Register scratch1,
Register except_flag,
CheckForInexactConversion check_inexact
= kDontCheckForInexactConversion);
@@ -984,14 +972,14 @@ class MacroAssembler: public Assembler {
// case scratch2, scratch3 and scratch4 are unmodified.
void StoreNumberToDoubleElements(Register value_reg,
Register key_reg,
+ Register receiver_reg,
// All regs below here overwritten.
Register elements_reg,
Register scratch1,
Register scratch2,
Register scratch3,
Register scratch4,
- Label* fail,
- int elements_offset = 0);
+ Label* fail);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
diff --git a/deps/v8/src/mips/regexp-macro-assembler-mips.cc b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
index 0dd72de33..672ba0eee 100644
--- a/deps/v8/src/mips/regexp-macro-assembler-mips.cc
+++ b/deps/v8/src/mips/regexp-macro-assembler-mips.cc
@@ -1155,7 +1155,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1186,7 +1186,7 @@ int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/mips/simulator-mips.cc b/deps/v8/src/mips/simulator-mips.cc
index ea359eade..cf87f9360 100644
--- a/deps/v8/src/mips/simulator-mips.cc
+++ b/deps/v8/src/mips/simulator-mips.cc
@@ -1016,13 +1016,6 @@ void Simulator::set_register(int reg, int32_t value) {
}
-void Simulator::set_dw_register(int reg, const int* dbl) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
- registers_[reg] = dbl[0];
- registers_[reg + 1] = dbl[1];
-}
-
-
void Simulator::set_fpu_register(int fpureg, int32_t value) {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
FPUregisters_[fpureg] = value;
@@ -1052,19 +1045,6 @@ int32_t Simulator::get_register(int reg) const {
}
-double Simulator::get_double_from_register_pair(int reg) {
- ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
-
- double dm_val = 0.0;
- // Read the bits from the unsigned integer register_[] array
- // into the double precision floating point value and return it.
- char buffer[2 * sizeof(registers_[0])];
- memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
- memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
- return(dm_val);
-}
-
-
int32_t Simulator::get_fpu_register(int fpureg) const {
ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
return FPUregisters_[fpureg];
@@ -2239,10 +2219,10 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
set_register(HI, static_cast<int32_t>(u64hilo >> 32));
break;
case DIV:
- // Divide by zero and overflow was not checked in the configuration
- // step - div and divu do not raise exceptions. On division by 0 and
- // on overflow (INT_MIN/-1), the result will be UNPREDICTABLE.
- if (rt != 0 && !(rs == INT_MIN && rt == -1)) {
+ // Divide by zero was not checked in the configuration step - div and
+ // divu do not raise exceptions. On division by 0, the result will
+ // be UNPREDICTABLE.
+ if (rt != 0) {
set_register(LO, rs / rt);
set_register(HI, rs % rt);
}
@@ -2738,7 +2718,34 @@ void Simulator::Execute() {
}
-void Simulator::CallInternal(byte* entry) {
+int32_t Simulator::Call(byte* entry, int argument_count, ...) {
+ va_list parameters;
+ va_start(parameters, argument_count);
+ // Set up arguments.
+
+ // First four arguments passed in registers.
+ ASSERT(argument_count >= 4);
+ set_register(a0, va_arg(parameters, int32_t));
+ set_register(a1, va_arg(parameters, int32_t));
+ set_register(a2, va_arg(parameters, int32_t));
+ set_register(a3, va_arg(parameters, int32_t));
+
+ // Remaining arguments passed on stack.
+ int original_stack = get_register(sp);
+ // Compute position of stack on entry to generated code.
+ int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
+ - kCArgsSlotsSize);
+ if (OS::ActivationFrameAlignment() != 0) {
+ entry_stack &= -OS::ActivationFrameAlignment();
+ }
+ // Store remaining arguments on stack, from low to high memory.
+ intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+ for (int i = 4; i < argument_count; i++) {
+ stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
+ }
+ va_end(parameters);
+ set_register(sp, entry_stack);
+
// Prepare to execute the code at entry.
set_register(pc, reinterpret_cast<int32_t>(entry));
// Put down marker for end of simulation. The simulator will stop simulation
@@ -2802,38 +2809,6 @@ void Simulator::CallInternal(byte* entry) {
set_register(gp, gp_val);
set_register(sp, sp_val);
set_register(fp, fp_val);
-}
-
-
-int32_t Simulator::Call(byte* entry, int argument_count, ...) {
- va_list parameters;
- va_start(parameters, argument_count);
- // Set up arguments.
-
- // First four arguments passed in registers.
- ASSERT(argument_count >= 4);
- set_register(a0, va_arg(parameters, int32_t));
- set_register(a1, va_arg(parameters, int32_t));
- set_register(a2, va_arg(parameters, int32_t));
- set_register(a3, va_arg(parameters, int32_t));
-
- // Remaining arguments passed on stack.
- int original_stack = get_register(sp);
- // Compute position of stack on entry to generated code.
- int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
- - kCArgsSlotsSize);
- if (OS::ActivationFrameAlignment() != 0) {
- entry_stack &= -OS::ActivationFrameAlignment();
- }
- // Store remaining arguments on stack, from low to high memory.
- intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
- for (int i = 4; i < argument_count; i++) {
- stack_argument[i - 4 + kCArgSlotCount] = va_arg(parameters, int32_t);
- }
- va_end(parameters);
- set_register(sp, entry_stack);
-
- CallInternal(entry);
// Pop stack passed arguments.
CHECK_EQ(entry_stack, get_register(sp));
@@ -2844,27 +2819,6 @@ int32_t Simulator::Call(byte* entry, int argument_count, ...) {
}
-double Simulator::CallFP(byte* entry, double d0, double d1) {
- if (!IsMipsSoftFloatABI) {
- set_fpu_register_double(f12, d0);
- set_fpu_register_double(f14, d1);
- } else {
- int buffer[2];
- ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
- memcpy(buffer, &d0, sizeof(d0));
- set_dw_register(a0, buffer);
- memcpy(buffer, &d1, sizeof(d1));
- set_dw_register(a2, buffer);
- }
- CallInternal(entry);
- if (!IsMipsSoftFloatABI) {
- return get_fpu_register_double(f0);
- } else {
- return get_double_from_register_pair(v0);
- }
-}
-
-
uintptr_t Simulator::PushAddress(uintptr_t address) {
int new_sp = get_register(sp) - sizeof(uintptr_t);
uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
diff --git a/deps/v8/src/mips/simulator-mips.h b/deps/v8/src/mips/simulator-mips.h
index 67f595302..776badc29 100644
--- a/deps/v8/src/mips/simulator-mips.h
+++ b/deps/v8/src/mips/simulator-mips.h
@@ -184,9 +184,7 @@ class Simulator {
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
- void set_dw_register(int dreg, const int* dbl);
int32_t get_register(int reg) const;
- double get_double_from_register_pair(int reg);
// Same for FPURegisters.
void set_fpu_register(int fpureg, int32_t value);
void set_fpu_register_float(int fpureg, float value);
@@ -216,8 +214,6 @@ class Simulator {
// generated RegExp code with 7 parameters. This is a convenience function,
// which sets up the simulator state and grabs the result on return.
int32_t Call(byte* entry, int argument_count, ...);
- // Alternative: call a 2-argument double function.
- double CallFP(byte* entry, double d0, double d1);
// Push an address onto the JS stack.
uintptr_t PushAddress(uintptr_t address);
@@ -357,7 +353,6 @@ class Simulator {
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
- void CallInternal(byte* entry);
// Architecture state.
// Registers.
diff --git a/deps/v8/src/mips/stub-cache-mips.cc b/deps/v8/src/mips/stub-cache-mips.cc
index 323933b5d..ba1d17722 100644
--- a/deps/v8/src/mips/stub-cache-mips.cc
+++ b/deps/v8/src/mips/stub-cache-mips.cc
@@ -314,23 +314,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index) {
- if (index.is_header_index()) {
- int offset = index.header_index() * kPointerSize;
+ int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
__ lw(dst, FieldMemOperand(src, offset));
} else {
- // Adjust for the number of properties stored in the holder.
- int slot = index.field_index() - holder->map()->inobject_properties();
- if (slot < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (slot * kPointerSize);
- __ lw(dst, FieldMemOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = slot * kPointerSize + FixedArray::kHeaderSize;
- __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
- __ lw(dst, FieldMemOperand(dst, offset));
- }
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
+ __ lw(dst, FieldMemOperand(dst, offset));
}
}
@@ -1205,7 +1200,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1554,7 +1549,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a2 : name
@@ -1628,7 +1623,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
} else {
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call the builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
+ Label attempt_to_grow_elements;
Register elements = t2;
Register end_elements = t1;
@@ -1639,7 +1634,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ CheckMap(elements,
v0,
Heap::kFixedArrayMapRootIndex,
- &check_double,
+ &call_builtin,
DONT_DO_SMI_CHECK);
// Get the array's length into v0 and calculate new length.
@@ -1655,6 +1650,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Branch(&attempt_to_grow_elements, gt, v0, Operand(t0));
// Check if value is a smi.
+ Label with_write_barrier;
__ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
__ JumpIfNotSmi(t0, &with_write_barrier);
@@ -1675,39 +1671,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Drop(argc + 1);
__ Ret();
- __ bind(&check_double);
-
- // Check that the elements are in fast mode and writable.
- __ CheckMap(elements,
- a0,
- Heap::kFixedDoubleArrayMapRootIndex,
- &call_builtin,
- DONT_DO_SMI_CHECK);
-
- // Get the array's length into r0 and calculate new length.
- __ lw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
- STATIC_ASSERT(kSmiTagSize == 1);
- STATIC_ASSERT(kSmiTag == 0);
- __ Addu(a0, a0, Operand(Smi::FromInt(argc)));
-
- // Get the elements' length.
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ Branch(&call_builtin, gt, a0, Operand(t0));
-
- __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize));
- __ StoreNumberToDoubleElements(
- t0, a0, elements, a3, t1, a2, t5,
- &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ sw(a0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-
- // Check for a smi.
- __ Drop(argc + 1);
- __ Ret();
-
__ bind(&with_write_barrier);
__ lw(a3, FieldMemOperand(receiver, HeapObject::kMapOffset));
@@ -1719,12 +1682,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(a3, t3, &call_builtin);
-
- __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset));
- __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
- __ Branch(&call_builtin, eq, t3, Operand(at));
// edx: receiver
- // a3: map
+ // r3: map
Label try_holey_map;
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
FAST_ELEMENTS,
@@ -2956,7 +2915,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- a0 : receiver
@@ -3147,7 +3106,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// ----------- S t a t e -------------
// -- ra : return address
// -- a0 : key
@@ -3494,7 +3453,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// t7: undefined
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
__ Check(ne, "Function constructed by construct stub.",
- a3, Operand(JS_FUNCTION_TYPE));
+ a3, Operand(JS_FUNCTION_TYPE));
#endif
// Now allocate the JSObject in new space.
@@ -3502,13 +3461,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
// a1: constructor function
// a2: initial map
// t7: undefined
- ASSERT(function->has_initial_map());
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-#ifdef DEBUG
- int instance_size = function->initial_map()->instance_size();
- __ Check(eq, "Instance size of initial map changed.",
- a3, Operand(instance_size >> kPointerSizeLog2));
-#endif
__ AllocateInNewSpace(a3, t4, t5, t6, &generic_stub_call, SIZE_IN_WORDS);
// Allocated the JSObject, now initialize the fields. Map is set to initial
@@ -3571,6 +3524,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -3695,7 +3649,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register scratch0,
Register scratch1,
FPURegister double_scratch0,
- FPURegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
@@ -3711,15 +3664,15 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DONT_DO_SMI_CHECK);
__ ldc1(double_scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
__ EmitFPUTruncate(kRoundToZero,
- scratch0,
double_scratch0,
- at,
- double_scratch1,
+ double_scratch0,
+ scratch0,
scratch1,
kCheckForInexactConversion);
__ Branch(fail, ne, scratch1, Operand(zero_reg));
+ __ mfc1(scratch0, double_scratch0);
__ SmiTagCheckOverflow(key, scratch0, scratch1);
__ BranchOnOverflow(fail, scratch1);
__ bind(&key_ok);
@@ -3747,7 +3700,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
// a3: elements array
@@ -3847,41 +3800,34 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Ret();
__ bind(&box_int);
+ // Allocate a HeapNumber for the result and perform int-to-double
+ // conversion.
+ // The arm version uses a temporary here to save r0, but we don't need to
+ // (a0 is not modified).
+ __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
+ __ AllocateHeapNumber(v0, a3, t0, t1, &slow);
if (CpuFeatures::IsSupported(FPU)) {
CpuFeatures::Scope scope(FPU);
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT);
__ mtc1(value, f0);
__ cvt_d_w(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
- // Allocate a HeapNumber for the result and perform int-to-double
- // conversion.
- // The arm version uses a temporary here to save r0, but we don't need to
- // (a0 is not modified).
- __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT);
- Register dst_mantissa = t2;
- Register dst_exponent = t3;
+ Register dst1 = t2;
+ Register dst2 = t3;
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm,
value,
dest,
f0,
- dst_mantissa,
- dst_exponent,
+ dst1,
+ dst2,
t1,
f2);
- __ sw(dst_mantissa, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
- __ sw(dst_exponent, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+ __ sw(dst1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+ __ sw(dst2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
__ Ret();
}
} else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
@@ -3904,7 +3850,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
// registers - also when jumping due to exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t2, t3, t6, &slow);
// This is replaced by a macro:
// __ mtc1(value, f0); // LS 32-bits.
@@ -3913,9 +3859,8 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
__ Cvt_d_uw(f0, value, f22);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
- __ Addu(v0, v0, kHeapObjectTag);
__ Ret();
} else {
// Check whether unsigned integer fits into smi.
@@ -3948,7 +3893,7 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// clobbers all registers - also when jumping due to exhausted young
// space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(t2, t3, t5, t6, &slow);
__ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
__ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
@@ -3965,19 +3910,17 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The float (single) value is already in fpu reg f0 (if we use float).
__ cvt_d_s(f0, f0);
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
-
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// FPU is not available, do manual single to double conversion.
// a2: floating point value (binary32).
@@ -4032,18 +3975,16 @@ void KeyedLoadStubCompiler::GenerateLoadExternalArray(
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
// The double value is already in f0
- __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
-
- __ Addu(v0, v0, kHeapObjectTag);
+ __ sdc1(f0, FieldMemOperand(v0, HeapNumber::kValueOffset));
__ Ret();
} else {
// Allocate a HeapNumber for the result. Don't use a0 and a1 as
// AllocateHeapNumber clobbers all registers - also when jumping due to
// exhausted young space.
__ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
- __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
+ __ AllocateHeapNumber(v0, t3, t5, t6, &slow);
__ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
__ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
@@ -4101,7 +4042,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key, t0, t1, f2, &miss_force_generic);
__ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
@@ -4180,7 +4121,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
}
FloatingPointHelper::ConvertIntToDouble(
masm, t1, destination,
- f0, t2, t3, // These are: double_dst, dst_mantissa, dst_exponent.
+ f0, t2, t3, // These are: double_dst, dst1, dst2.
t0, f2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kFPURegisters) {
CpuFeatures::Scope scope(FPU);
@@ -4490,7 +4431,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, a0, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, a0, t0, t1, f2, &miss_force_generic);
// Get the elements array.
__ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
@@ -4541,7 +4482,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
// Get the elements array.
__ lw(elements_reg,
@@ -4561,7 +4502,7 @@ void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
// Non-NaN. Allocate a new heap number and copy the double value into it.
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
__ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
- heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
+ heap_number_map, &slow_allocate_heapnumber);
// Don't need to reload the upper 32 bits of the double, it's already in
// scratch.
@@ -4615,7 +4556,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
if (IsFastSmiElementsKind(elements_kind)) {
__ JumpIfNotSmi(value_reg, &transition_elements_kind);
@@ -4759,12 +4700,11 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- a1 : key
// -- a2 : receiver
// -- ra : return address
- // -- a3 : scratch (elements backing store)
+ // -- a3 : scratch
// -- t0 : scratch (elements_reg)
// -- t1 : scratch (mantissa_reg)
// -- t2 : scratch (exponent_reg)
// -- t3 : scratch4
- // -- t4 : scratch
// -----------------------------------
Label miss_force_generic, transition_elements_kind, grow, slow;
Label finish_store, check_capacity;
@@ -4777,14 +4717,13 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
Register scratch2 = t1;
Register scratch3 = t2;
Register scratch4 = t3;
- Register scratch5 = t4;
Register length_reg = t3;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
// Check that the key is a smi or a heap number convertible to a smi.
- GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
+ GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, &miss_force_generic);
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4808,6 +4747,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ StoreNumberToDoubleElements(value_reg,
key_reg,
+ receiver_reg,
// All registers after this are overwritten.
elements_reg,
scratch1,
@@ -4858,32 +4798,14 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ AllocateInNewSpace(size, elements_reg, scratch1, scratch2, &slow,
TAG_OBJECT);
- // Initialize the new FixedDoubleArray.
+ // Initialize the new FixedDoubleArray. Leave elements unitialized for
+ // efficiency, they are guaranteed to be initialized before use.
__ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
__ sw(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
__ li(scratch1, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
__ sw(scratch1,
FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
- __ mov(scratch1, elements_reg);
- __ StoreNumberToDoubleElements(value_reg,
- key_reg,
- // All registers after this are overwritten.
- scratch1,
- scratch2,
- scratch3,
- scratch4,
- scratch5,
- &transition_elements_kind);
-
- __ li(scratch1, Operand(kHoleNanLower32));
- __ li(scratch2, Operand(kHoleNanUpper32));
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- int offset = FixedDoubleArray::OffsetOfElementAt(i);
- __ sw(scratch1, FieldMemOperand(elements_reg, offset));
- __ sw(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
- }
-
// Install the new backing store in the JSArray.
__ sw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
@@ -4896,7 +4818,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ sw(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
__ lw(elements_reg,
FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
- __ Ret();
+ __ jmp(&finish_store);
__ bind(&check_capacity);
// Make sure that the backing store can hold additional elements.
diff --git a/deps/v8/src/mirror-debugger.js b/deps/v8/src/mirror-debugger.js
index 7f1a05aed..a5331a014 100644
--- a/deps/v8/src/mirror-debugger.js
+++ b/deps/v8/src/mirror-debugger.js
@@ -1844,14 +1844,10 @@ function ScopeDetails(frame, fun, index) {
frame.details_.frameId(),
frame.details_.inlinedFrameIndex(),
index);
- this.frame_id_ = frame.details_.frameId();
- this.inlined_frame_id_ = frame.details_.inlinedFrameIndex();
} else {
this.details_ = %GetFunctionScopeDetails(fun.value(), index);
- this.fun_value_ = fun.value();
this.break_id_ = undefined;
}
- this.index_ = index;
}
@@ -1871,22 +1867,6 @@ ScopeDetails.prototype.object = function() {
};
-ScopeDetails.prototype.setVariableValueImpl = function(name, new_value) {
- var raw_res;
- if (!IS_UNDEFINED(this.break_id_)) {
- %CheckExecutionState(this.break_id_);
- raw_res = %SetScopeVariableValue(this.break_id_, this.frame_id_,
- this.inlined_frame_id_, this.index_, name, new_value);
- } else {
- raw_res = %SetScopeVariableValue(this.fun_value_, null, null, this.index_,
- name, new_value);
- }
- if (!raw_res) {
- throw new Error("Failed to set variable value");
- }
-};
-
-
/**
* Mirror object for scope of frame or function. Either frame or function must
* be specified.
@@ -1934,11 +1914,6 @@ ScopeMirror.prototype.scopeObject = function() {
};
-ScopeMirror.prototype.setVariableValue = function(name, new_value) {
- this.details_.setVariableValueImpl(name, new_value);
-};
-
-
/**
* Mirror object for script source.
* @param {Script} script The script object
diff --git a/deps/v8/src/object-observe.js b/deps/v8/src/object-observe.js
deleted file mode 100644
index 8c2895f4b..000000000
--- a/deps/v8/src/object-observe.js
+++ /dev/null
@@ -1,242 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var observationState = %GetObservationState();
-if (IS_UNDEFINED(observationState.observerInfoMap)) {
- observationState.observerInfoMap = %CreateObjectHashTable();
- observationState.objectInfoMap = %CreateObjectHashTable();
- observationState.notifierTargetMap = %CreateObjectHashTable();
- observationState.pendingObservers = new InternalArray;
- observationState.observerPriority = 0;
-}
-
-function InternalObjectHashTable(tableName) {
- this.tableName = tableName;
-}
-
-InternalObjectHashTable.prototype = {
- get: function(key) {
- return %ObjectHashTableGet(observationState[this.tableName], key);
- },
- set: function(key, value) {
- observationState[this.tableName] =
- %ObjectHashTableSet(observationState[this.tableName], key, value);
- },
- has: function(key) {
- return !IS_UNDEFINED(this.get(key));
- }
-};
-
-var observerInfoMap = new InternalObjectHashTable('observerInfoMap');
-var objectInfoMap = new InternalObjectHashTable('objectInfoMap');
-var notifierTargetMap = new InternalObjectHashTable('notifierTargetMap');
-
-function CreateObjectInfo(object) {
- var info = {
- changeObservers: new InternalArray,
- notifier: null,
- };
- objectInfoMap.set(object, info);
- return info;
-}
-
-function ObjectObserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["observe"]);
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["observe"]);
- if (ObjectIsFrozen(callback))
- throw MakeTypeError("observe_callback_frozen");
-
- if (!observerInfoMap.has(callback)) {
- observerInfoMap.set(callback, {
- pendingChangeRecords: null,
- priority: observationState.observerPriority++,
- });
- }
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo)) {
- objectInfo = CreateObjectInfo(object);
- %SetIsObserved(object, true);
- }
-
- var changeObservers = objectInfo.changeObservers;
- if (changeObservers.indexOf(callback) < 0)
- changeObservers.push(callback);
-
- return object;
-}
-
-function ObjectUnobserve(object, callback) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["unobserve"]);
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["unobserve"]);
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo))
- return object;
-
- var changeObservers = objectInfo.changeObservers;
- var index = changeObservers.indexOf(callback);
- if (index >= 0)
- changeObservers.splice(index, 1);
-
- return object;
-}
-
-function EnqueueChangeRecord(changeRecord, observers) {
- for (var i = 0; i < observers.length; i++) {
- var observer = observers[i];
- var observerInfo = observerInfoMap.get(observer);
- observationState.pendingObservers[observerInfo.priority] = observer;
- %SetObserverDeliveryPending();
- if (IS_NULL(observerInfo.pendingChangeRecords)) {
- observerInfo.pendingChangeRecords = new InternalArray(changeRecord);
- } else {
- observerInfo.pendingChangeRecords.push(changeRecord);
- }
- }
-}
-
-function NotifyChange(type, object, name, oldValue) {
- var objectInfo = objectInfoMap.get(object);
- var changeRecord = (arguments.length < 4) ?
- { type: type, object: object, name: name } :
- { type: type, object: object, name: name, oldValue: oldValue };
- ObjectFreeze(changeRecord);
- EnqueueChangeRecord(changeRecord, objectInfo.changeObservers);
-}
-
-var notifierPrototype = {};
-
-function ObjectNotifierNotify(changeRecord) {
- if (!IS_SPEC_OBJECT(this))
- throw MakeTypeError("called_on_non_object", ["notify"]);
-
- var target = notifierTargetMap.get(this);
- if (IS_UNDEFINED(target))
- throw MakeTypeError("observe_notify_non_notifier");
-
- if (!IS_STRING(changeRecord.type))
- throw MakeTypeError("observe_type_non_string");
-
- var objectInfo = objectInfoMap.get(target);
- if (IS_UNDEFINED(objectInfo))
- return;
-
- if (!objectInfo.changeObservers.length)
- return;
-
- var newRecord = {
- object: target
- };
- for (var prop in changeRecord) {
- if (prop === 'object')
- continue;
-
- %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
- READ_ONLY + DONT_DELETE);
- }
- ObjectFreeze(newRecord);
-
- EnqueueChangeRecord(newRecord, objectInfo.changeObservers);
-}
-
-function ObjectGetNotifier(object) {
- if (!IS_SPEC_OBJECT(object))
- throw MakeTypeError("observe_non_object", ["getNotifier"]);
-
- if (ObjectIsFrozen(object))
- return null;
-
- var objectInfo = objectInfoMap.get(object);
- if (IS_UNDEFINED(objectInfo))
- objectInfo = CreateObjectInfo(object);
-
- if (IS_NULL(objectInfo.notifier)) {
- objectInfo.notifier = {
- __proto__: notifierPrototype
- };
- notifierTargetMap.set(objectInfo.notifier, object);
- }
-
- return objectInfo.notifier;
-}
-
-function DeliverChangeRecordsForObserver(observer) {
- var observerInfo = observerInfoMap.get(observer);
- if (IS_UNDEFINED(observerInfo))
- return;
-
- var pendingChangeRecords = observerInfo.pendingChangeRecords;
- if (IS_NULL(pendingChangeRecords))
- return;
-
- observerInfo.pendingChangeRecords = null;
- delete observationState.pendingObservers[observerInfo.priority];
- var delivered = [];
- %MoveArrayContents(pendingChangeRecords, delivered);
- try {
- %Call(void 0, delivered, observer);
- } catch (ex) {}
-}
-
-function ObjectDeliverChangeRecords(callback) {
- if (!IS_SPEC_FUNCTION(callback))
- throw MakeTypeError("observe_non_function", ["deliverChangeRecords"]);
-
- DeliverChangeRecordsForObserver(callback);
-}
-
-function DeliverChangeRecords() {
- while (observationState.pendingObservers.length) {
- var pendingObservers = observationState.pendingObservers;
- observationState.pendingObservers = new InternalArray;
- for (var i in pendingObservers) {
- DeliverChangeRecordsForObserver(pendingObservers[i]);
- }
- }
-}
-
-function SetupObjectObserve() {
- %CheckIsBootstrapping();
- InstallFunctions($Object, DONT_ENUM, $Array(
- "deliverChangeRecords", ObjectDeliverChangeRecords,
- "getNotifier", ObjectGetNotifier,
- "observe", ObjectObserve,
- "unobserve", ObjectUnobserve
- ));
- InstallFunctions(notifierPrototype, DONT_ENUM, $Array(
- "notify", ObjectNotifierNotify
- ));
-}
-
-SetupObjectObserve();
diff --git a/deps/v8/src/objects-debug.cc b/deps/v8/src/objects-debug.cc
index e3226c18b..be9659296 100644
--- a/deps/v8/src/objects-debug.cc
+++ b/deps/v8/src/objects-debug.cc
@@ -463,13 +463,13 @@ void String::StringVerify() {
ConsString::cast(this)->ConsStringVerify();
} else if (IsSlicedString()) {
SlicedString::cast(this)->SlicedStringVerify();
- } else if (IsSeqOneByteString()) {
- SeqOneByteString::cast(this)->SeqOneByteStringVerify();
+ } else if (IsSeqAsciiString()) {
+ SeqAsciiString::cast(this)->SeqAsciiStringVerify();
}
}
-void SeqOneByteString::SeqOneByteStringVerify() {
+void SeqAsciiString::SeqAsciiStringVerify() {
CHECK(String::IsAscii(GetChars(), length()));
}
@@ -499,8 +499,7 @@ void JSFunction::JSFunctionVerify() {
VerifyObjectField(kPrototypeOrInitialMapOffset);
VerifyObjectField(kNextFunctionLinkOffset);
CHECK(code()->IsCode());
- CHECK(next_function_link() == NULL ||
- next_function_link()->IsUndefined() ||
+ CHECK(next_function_link()->IsUndefined() ||
next_function_link()->IsJSFunction());
}
diff --git a/deps/v8/src/objects-inl.h b/deps/v8/src/objects-inl.h
index 1d9c87f0e..d2f996bae 100644
--- a/deps/v8/src/objects-inl.h
+++ b/deps/v8/src/objects-inl.h
@@ -134,14 +134,6 @@ bool Object::IsFixedArrayBase() {
}
-// External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() {
- return Object::IsHeapObject() &&
- HeapObject::cast(this)->map() ==
- HeapObject::cast(this)->GetHeap()->external_map();
-}
-
-
bool Object::IsInstanceOf(FunctionTemplateInfo* expected) {
// There is a constraint on the object; check.
if (!this->IsJSObject()) return false;
@@ -229,10 +221,10 @@ bool Object::IsSeqString() {
}
-bool Object::IsSeqOneByteString() {
+bool Object::IsSeqAsciiString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsSequential() &&
- String::cast(this)->IsOneByteRepresentation();
+ String::cast(this)->IsAsciiRepresentation();
}
@@ -252,7 +244,7 @@ bool Object::IsExternalString() {
bool Object::IsExternalAsciiString() {
if (!IsString()) return false;
return StringShape(String::cast(this)).IsExternal() &&
- String::cast(this)->IsOneByteRepresentation();
+ String::cast(this)->IsAsciiRepresentation();
}
@@ -295,9 +287,9 @@ bool StringShape::IsSymbol() {
}
-bool String::IsOneByteRepresentation() {
+bool String::IsAsciiRepresentation() {
uint32_t type = map()->instance_type();
- return (type & kStringEncodingMask) == kOneByteStringTag;
+ return (type & kStringEncodingMask) == kAsciiStringTag;
}
@@ -307,18 +299,18 @@ bool String::IsTwoByteRepresentation() {
}
-bool String::IsOneByteRepresentationUnderneath() {
+bool String::IsAsciiRepresentationUnderneath() {
uint32_t type = map()->instance_type();
STATIC_ASSERT(kIsIndirectStringTag != 0);
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
+ case kAsciiStringTag:
return true;
case kTwoByteStringTag:
return false;
default: // Cons or sliced string. Need to go deeper.
- return GetUnderlying()->IsOneByteRepresentation();
+ return GetUnderlying()->IsAsciiRepresentation();
}
}
@@ -329,7 +321,7 @@ bool String::IsTwoByteRepresentationUnderneath() {
STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
ASSERT(IsFlat());
switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
- case kOneByteStringTag:
+ case kAsciiStringTag:
return false;
case kTwoByteStringTag:
return true;
@@ -341,7 +333,8 @@ bool String::IsTwoByteRepresentationUnderneath() {
bool String::HasOnlyAsciiChars() {
uint32_t type = map()->instance_type();
- return (type & kAsciiDataHintMask) == kAsciiDataHintTag;
+ return (type & kStringEncodingMask) == kAsciiStringTag ||
+ (type & kAsciiDataHintMask) == kAsciiDataHintTag;
}
@@ -394,7 +387,7 @@ STATIC_CHECK(static_cast<uint32_t>(kStringEncodingMask) ==
bool StringShape::IsSequentialAscii() {
- return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
+ return full_representation_tag() == (kSeqStringTag | kAsciiStringTag);
}
@@ -404,14 +397,14 @@ bool StringShape::IsSequentialTwoByte() {
bool StringShape::IsExternalAscii() {
- return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
+ return full_representation_tag() == (kExternalStringTag | kAsciiStringTag);
}
-STATIC_CHECK((kExternalStringTag | kOneByteStringTag) ==
+STATIC_CHECK((kExternalStringTag | kAsciiStringTag) ==
Internals::kExternalAsciiRepresentationTag);
-STATIC_CHECK(v8::String::ASCII_ENCODING == kOneByteStringTag);
+STATIC_CHECK(v8::String::ASCII_ENCODING == kAsciiStringTag);
bool StringShape::IsExternalTwoByte() {
@@ -667,8 +660,8 @@ bool Object::IsDictionary() {
bool Object::IsSymbolTable() {
- return IsHashTable() &&
- this == HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
+ return IsHashTable() && this ==
+ HeapObject::cast(this)->GetHeap()->raw_unchecked_symbol_table();
}
@@ -725,11 +718,6 @@ bool Object::IsMapCache() {
}
-bool Object::IsObjectHashTable() {
- return IsHashTable();
-}
-
-
bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString();
}
@@ -1064,11 +1052,7 @@ Failure* Failure::Construct(Type type, intptr_t value) {
uintptr_t info =
(static_cast<uintptr_t>(value) << kFailureTypeTagSize) | type;
ASSERT(((info << kFailureTagSize) >> kFailureTagSize) == info);
- // Fill the unused bits with a pattern that's easy to recognize in crash
- // dumps.
- static const int kFailureMagicPattern = 0x0BAD0000;
- return reinterpret_cast<Failure*>(
- (info << kFailureTagSize) | kFailureTag | kFailureMagicPattern);
+ return reinterpret_cast<Failure*>((info << kFailureTagSize) | kFailureTag);
}
@@ -1866,7 +1850,7 @@ void FixedArray::set(int index,
void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
int offset = kHeaderSize + index * kPointerSize;
WRITE_FIELD(array, offset, value);
@@ -1880,7 +1864,7 @@ void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
void FixedArray::NoWriteBarrierSet(FixedArray* array,
int index,
Object* value) {
- ASSERT(array->map() != HEAP->fixed_cow_array_map());
+ ASSERT(array->map() != HEAP->raw_unchecked_fixed_cow_array_map());
ASSERT(index >= 0 && index < array->length());
ASSERT(!HEAP->InNewSpace(value));
WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
@@ -1947,11 +1931,6 @@ void FixedArray::set_null_unchecked(Heap* heap, int index) {
}
-double* FixedDoubleArray::data_start() {
- return reinterpret_cast<double*>(FIELD_ADDR(this, kHeaderSize));
-}
-
-
Object** FixedArray::data_start() {
return HeapObject::RawField(this, kHeaderSize);
}
@@ -2311,8 +2290,7 @@ int HashTable<Shape, Key>::FindEntry(Isolate* isolate, Key key) {
// EnsureCapacity will guarantee the hash table is never full.
while (true) {
Object* element = KeyAt(entry);
- // Empty entry. Uses raw unchecked accessors because it is called by the
- // symbol table during bootstrapping.
+ // Empty entry.
if (element == isolate->heap()->raw_unchecked_undefined_value()) break;
if (element != isolate->heap()->raw_unchecked_the_hole_value() &&
Shape::IsMatch(key, element)) return entry;
@@ -2362,7 +2340,7 @@ CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqAsciiString)
CAST_ACCESSOR(SeqTwoByteString)
CAST_ACCESSOR(SlicedString)
CAST_ACCESSOR(ConsString)
@@ -2466,18 +2444,18 @@ String* String::TryFlattenGetString(PretenureFlag pretenure) {
uint16_t String::Get(int index) {
ASSERT(index >= 0 && index < length());
switch (StringShape(this).full_representation_tag()) {
- case kSeqStringTag | kOneByteStringTag:
- return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
+ case kSeqStringTag | kAsciiStringTag:
+ return SeqAsciiString::cast(this)->SeqAsciiStringGet(index);
case kSeqStringTag | kTwoByteStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGet(index);
- case kConsStringTag | kOneByteStringTag:
+ case kConsStringTag | kAsciiStringTag:
case kConsStringTag | kTwoByteStringTag:
return ConsString::cast(this)->ConsStringGet(index);
- case kExternalStringTag | kOneByteStringTag:
+ case kExternalStringTag | kAsciiStringTag:
return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
case kExternalStringTag | kTwoByteStringTag:
return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
- case kSlicedStringTag | kOneByteStringTag:
+ case kSlicedStringTag | kAsciiStringTag:
case kSlicedStringTag | kTwoByteStringTag:
return SlicedString::cast(this)->SlicedStringGet(index);
default:
@@ -2493,8 +2471,8 @@ void String::Set(int index, uint16_t value) {
ASSERT(index >= 0 && index < length());
ASSERT(StringShape(this).IsSequential());
- return this->IsOneByteRepresentation()
- ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
+ return this->IsAsciiRepresentation()
+ ? SeqAsciiString::cast(this)->SeqAsciiStringSet(index, value)
: SeqTwoByteString::cast(this)->SeqTwoByteStringSet(index, value);
}
@@ -2516,96 +2494,25 @@ String* String::GetUnderlying() {
}
-template<class Visitor, class ConsOp>
-void String::Visit(
- String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& consOp,
- int32_t type,
- unsigned length) {
-
- ASSERT(length == static_cast<unsigned>(string->length()));
- ASSERT(offset <= length);
-
- unsigned sliceOffset = offset;
- while (true) {
- ASSERT(type == string->map()->instance_type());
-
- switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
- case kSeqStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
- reinterpret_cast<const uint8_t*>(
- SeqOneByteString::cast(string)->GetChars()) + sliceOffset,
- length - offset);
- return;
-
- case kSeqStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
- reinterpret_cast<const uint16_t*>(
- SeqTwoByteString::cast(string)->GetChars()) + sliceOffset,
- length - offset);
- return;
-
- case kExternalStringTag | kOneByteStringTag:
- visitor.VisitOneByteString(
- reinterpret_cast<const uint8_t*>(
- ExternalAsciiString::cast(string)->GetChars()) + sliceOffset,
- length - offset);
- return;
-
- case kExternalStringTag | kTwoByteStringTag:
- visitor.VisitTwoByteString(
- reinterpret_cast<const uint16_t*>(
- ExternalTwoByteString::cast(string)->GetChars()) + sliceOffset,
- length - offset);
- return;
-
- case kSlicedStringTag | kOneByteStringTag:
- case kSlicedStringTag | kTwoByteStringTag: {
- SlicedString* slicedString = SlicedString::cast(string);
- sliceOffset += slicedString->offset();
- string = slicedString->parent();
- type = string->map()->instance_type();
- continue;
- }
-
- case kConsStringTag | kOneByteStringTag:
- case kConsStringTag | kTwoByteStringTag:
- string = consOp.Operate(ConsString::cast(string), &offset, &type,
- &length);
- if (string == NULL) return;
- sliceOffset = offset;
- ASSERT(length == static_cast<unsigned>(string->length()));
- continue;
-
- default:
- UNREACHABLE();
- return;
- }
- }
-}
-
-
-uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
+uint16_t SeqAsciiString::SeqAsciiStringGet(int index) {
ASSERT(index >= 0 && index < length());
return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
}
-void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
+void SeqAsciiString::SeqAsciiStringSet(int index, uint16_t value) {
ASSERT(index >= 0 && index < length() && value <= kMaxAsciiCharCode);
WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
static_cast<byte>(value));
}
-Address SeqOneByteString::GetCharsAddress() {
+Address SeqAsciiString::GetCharsAddress() {
return FIELD_ADDR(this, kHeaderSize);
}
-char* SeqOneByteString::GetChars() {
+char* SeqAsciiString::GetChars() {
return reinterpret_cast<char*>(GetCharsAddress());
}
@@ -2637,7 +2544,7 @@ int SeqTwoByteString::SeqTwoByteStringSize(InstanceType instance_type) {
}
-int SeqOneByteString::SeqOneByteStringSize(InstanceType instance_type) {
+int SeqAsciiString::SeqAsciiStringSize(InstanceType instance_type) {
return SizeFor(length());
}
@@ -2765,146 +2672,6 @@ const uint16_t* ExternalTwoByteString::ExternalTwoByteStringGetData(
}
-unsigned ConsStringIteratorOp::OffsetForDepth(unsigned depth) {
- return depth & kDepthMask;
-}
-
-
-uint32_t ConsStringIteratorOp::MaskForDepth(unsigned depth) {
- return 1 << OffsetForDepth(depth);
-}
-
-
-void ConsStringIteratorOp::SetRightDescent() {
- trace_ |= MaskForDepth(depth_ - 1);
-}
-
-
-void ConsStringIteratorOp::ClearRightDescent() {
- trace_ &= ~MaskForDepth(depth_ - 1);
-}
-
-
-void ConsStringIteratorOp::PushLeft(ConsString* string) {
- frames_[depth_++ & kDepthMask] = string;
-}
-
-
-void ConsStringIteratorOp::PushRight(ConsString* string, int32_t type) {
- // Inplace update
- frames_[(depth_-1) & kDepthMask] = string;
- if (depth_ != 1) return;
- // Optimization: can replace root in this case.
- root_ = string;
- root_type_ = type;
- root_length_ = string->length();
-}
-
-
-void ConsStringIteratorOp::AdjustMaximumDepth() {
- if (depth_ > maximum_depth_) maximum_depth_ = depth_;
-}
-
-
-void ConsStringIteratorOp::Pop() {
- ASSERT(depth_ > 0);
- ASSERT(depth_ <= maximum_depth_);
- depth_--;
-}
-
-
-void ConsStringIteratorOp::Reset() {
- consumed_ = 0;
- ResetStack();
-}
-
-
-bool ConsStringIteratorOp::HasMore() {
- return depth_ != 0;
-}
-
-
-void ConsStringIteratorOp::ResetStack() {
- depth_ = 0;
- maximum_depth_ = 0;
-}
-
-
-bool ConsStringIteratorOp::ContinueOperation(ContinueResponse* response) {
- bool blewStack;
- int32_t type;
- String* string = NextLeaf(&blewStack, &type);
- // String found.
- if (string != NULL) {
- unsigned length = string->length();
- consumed_ += length;
- response->string_ = string;
- response->offset_ = 0;
- response->length_ = length;
- response->type_ = type;
- return true;
- }
- // Traversal complete.
- if (!blewStack) return false;
- // Restart search.
- ResetStack();
- response->string_ = root_;
- response->offset_ = consumed_;
- response->length_ = root_length_;
- response->type_ = root_type_;
- return true;
-}
-
-
-uint16_t StringCharacterStream::GetNext() {
- ASSERT(buffer8_ != NULL);
- return is_one_byte_ ? *buffer8_++ : *buffer16_++;
-}
-
-
-StringCharacterStream::StringCharacterStream(
- String* string, unsigned offset, ConsStringIteratorOp* op)
- : is_one_byte_(true),
- buffer8_(NULL),
- end_(NULL),
- op_(op) {
- op->Reset();
- String::Visit(string,
- offset, *this, *op, string->map()->instance_type(), string->length());
-}
-
-
-bool StringCharacterStream::HasMore() {
- if (buffer8_ != end_) return true;
- if (!op_->HasMore()) return false;
- ConsStringIteratorOp::ContinueResponse response;
- // This has been checked above
- if (!op_->ContinueOperation(&response)) {
- UNREACHABLE();
- return false;
- }
- String::Visit(response.string_,
- response.offset_, *this, *op_, response.type_, response.length_);
- return true;
-}
-
-
-void StringCharacterStream::VisitOneByteString(
- const uint8_t* chars, unsigned length) {
- is_one_byte_ = true;
- buffer8_ = chars;
- end_ = chars + length;
-}
-
-
-void StringCharacterStream::VisitTwoByteString(
- const uint16_t* chars, unsigned length) {
- is_one_byte_ = false;
- buffer16_ = chars;
- end_ = reinterpret_cast<const uint8_t*>(chars + length);
-}
-
-
void JSFunctionResultCache::MakeZeroSize() {
set_finger_index(kEntriesIndex);
set_size(kEntriesIndex);
@@ -3195,8 +2962,8 @@ int HeapObject::SizeFromMap(Map* map) {
return FixedArray::BodyDescriptor::SizeOf(map, this);
}
if (instance_type == ASCII_STRING_TYPE) {
- return SeqOneByteString::SizeFor(
- reinterpret_cast<SeqOneByteString*>(this)->length());
+ return SeqAsciiString::SizeFor(
+ reinterpret_cast<SeqAsciiString*>(this)->length());
}
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
@@ -3383,16 +3150,6 @@ bool Map::owns_descriptors() {
}
-void Map::set_is_observed(bool is_observed) {
- set_bit_field3(IsObserved::update(bit_field3(), is_observed));
-}
-
-
-bool Map::is_observed() {
- return IsObserved::decode(bit_field3());
-}
-
-
void Code::set_flags(Code::Flags flags) {
STATIC_ASSERT(Code::NUMBER_OF_KINDS <= KindField::kMax + 1);
// Make sure that all call stubs have an arguments count.
@@ -3635,6 +3392,66 @@ void Code::set_unary_op_type(byte value) {
}
+byte Code::binary_op_type() {
+ ASSERT(is_binary_op_stub());
+ return BinaryOpTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_binary_op_type(byte value) {
+ ASSERT(is_binary_op_stub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
+byte Code::binary_op_result_type() {
+ ASSERT(is_binary_op_stub());
+ return BinaryOpResultTypeField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_binary_op_result_type(byte value) {
+ ASSERT(is_binary_op_stub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = BinaryOpResultTypeField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
+byte Code::compare_state() {
+ ASSERT(is_compare_ic_stub());
+ return CompareStateField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_compare_state(byte value) {
+ ASSERT(is_compare_ic_stub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareStateField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
+byte Code::compare_operation() {
+ ASSERT(is_compare_ic_stub());
+ return CompareOperationField::decode(
+ READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
+}
+
+
+void Code::set_compare_operation(byte value) {
+ ASSERT(is_compare_ic_stub());
+ int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+ int updated = CompareOperationField::update(previous, value);
+ WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
+}
+
+
byte Code::to_boolean_state() {
ASSERT(is_to_boolean_ic_stub());
return ToBooleanStateField::decode(
@@ -4308,10 +4125,11 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
void SharedFunctionInfo::BeforeVisitingPointers() {
if (IsInobjectSlackTrackingInProgress()) DetachInitialMap();
-}
-
-void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ // Flush optimized code map on major GC.
+ // Note: we may experiment with rebuilding it or retaining entries
+ // which should survive as we iterate through optimized functions
+ // anyway.
set_optimized_code_map(Smi::FromInt(0));
}
@@ -4326,7 +4144,7 @@ bool Script::HasValidSource() {
if (!src->IsString()) return true;
String* src_str = String::cast(src);
if (!StringShape(src_str).IsExternal()) return true;
- if (src_str->IsOneByteRepresentation()) {
+ if (src_str->IsAsciiRepresentation()) {
return ExternalAsciiString::cast(src)->resource() != NULL;
} else if (src_str->IsTwoByteRepresentation()) {
return ExternalTwoByteString::cast(src)->resource() != NULL;
@@ -4368,19 +4186,6 @@ void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
}
-void SharedFunctionInfo::ReplaceCode(Code* value) {
- // If the GC metadata field is already used then the function was
- // enqueued as a code flushing candidate and we remove it now.
- if (code()->gc_metadata() != NULL) {
- CodeFlusher* flusher = GetHeap()->mark_compact_collector()->code_flusher();
- flusher->EvictCandidate(this);
- }
-
- ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
- set_code(value);
-}
-
-
ScopeInfo* SharedFunctionInfo::scope_info() {
return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
}
@@ -4607,6 +4412,42 @@ void JSFunction::set_initial_map(Map* value) {
}
+MaybeObject* JSFunction::set_initial_map_and_cache_transitions(
+ Map* initial_map) {
+ Context* native_context = context()->native_context();
+ Object* array_function =
+ native_context->get(Context::ARRAY_FUNCTION_INDEX);
+ if (array_function->IsJSFunction() &&
+ this == JSFunction::cast(array_function)) {
+ // Replace all of the cached initial array maps in the native context with
+ // the appropriate transitioned elements kind maps.
+ Heap* heap = GetHeap();
+ MaybeObject* maybe_maps =
+ heap->AllocateFixedArrayWithHoles(kElementsKindCount);
+ FixedArray* maps;
+ if (!maybe_maps->To(&maps)) return maybe_maps;
+
+ Map* current_map = initial_map;
+ ElementsKind kind = current_map->elements_kind();
+ ASSERT(kind == GetInitialFastElementsKind());
+ maps->set(kind, current_map);
+ for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
+ i < kFastElementsKindCount; ++i) {
+ Map* new_map;
+ ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
+ MaybeObject* maybe_new_map =
+ current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
+ maps->set(next_kind, new_map);
+ current_map = new_map;
+ }
+ native_context->set_js_array_maps(maps);
+ }
+ set_initial_map(initial_map);
+ return this;
+}
+
+
bool JSFunction::has_initial_map() {
return prototype_or_initial_map()->IsMap();
}
@@ -4798,49 +4639,13 @@ JSMessageObject* JSMessageObject::cast(Object* obj) {
INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
-INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
ACCESSORS(Code, handler_table, FixedArray, kHandlerTableOffset)
ACCESSORS(Code, deoptimization_data, FixedArray, kDeoptimizationDataOffset)
-
-
-// Type feedback slot: type_feedback_info for FUNCTIONs, stub_info for STUBs.
-void Code::InitializeTypeFeedbackInfoNoWriteBarrier(Object* value) {
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
-}
-
-
-Object* Code::type_feedback_info() {
- ASSERT(kind() == FUNCTION);
- return Object::cast(READ_FIELD(this, kTypeFeedbackInfoOffset));
-}
-
-
-void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
- ASSERT(kind() == FUNCTION);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, value);
- CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
- value, mode);
-}
-
-
-int Code::stub_info() {
- ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
- Object* value = READ_FIELD(this, kTypeFeedbackInfoOffset);
- return Smi::cast(value)->value();
-}
-
-
-void Code::set_stub_info(int value) {
- ASSERT(kind() == COMPARE_IC || kind() == BINARY_OP_IC);
- WRITE_FIELD(this, kTypeFeedbackInfoOffset, Smi::FromInt(value));
-}
-
-
+ACCESSORS(Code, type_feedback_info, Object, kTypeFeedbackInfoOffset)
ACCESSORS(Code, gc_metadata, Object, kGCMetadataOffset)
INT_ACCESSORS(Code, ic_age, kICAgeOffset)
-
byte* Code::instruction_start() {
return FIELD_ADDR(this, kHeaderSize);
}
@@ -5021,11 +4826,6 @@ bool JSObject::HasFastHoleyElements() {
}
-bool JSObject::HasFastElements() {
- return IsFastElementsKind(GetElementsKind());
-}
-
-
bool JSObject::HasDictionaryElements() {
return GetElementsKind() == DICTIONARY_ELEMENTS;
}
@@ -5261,23 +5061,9 @@ bool JSReceiver::HasLocalProperty(String* name) {
PropertyAttributes JSReceiver::GetPropertyAttribute(String* key) {
- uint32_t index;
- if (IsJSObject() && key->AsArrayIndex(&index)) {
- return GetElementAttribute(index);
- }
return GetPropertyAttributeWithReceiver(this, key);
}
-
-PropertyAttributes JSReceiver::GetElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true);
-}
-
-
// TODO(504): this may be useful in other places too where JSGlobalProxy
// is used.
Object* JSObject::BypassGlobalProxy() {
@@ -5302,26 +5088,7 @@ bool JSReceiver::HasElement(uint32_t index) {
if (IsJSProxy()) {
return JSProxy::cast(this)->HasElementWithHandler(index);
}
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, true) != ABSENT;
-}
-
-
-bool JSReceiver::HasLocalElement(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->HasElementWithHandler(index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false) != ABSENT;
-}
-
-
-PropertyAttributes JSReceiver::GetLocalElementAttribute(uint32_t index) {
- if (IsJSProxy()) {
- return JSProxy::cast(this)->GetElementAttributeWithHandler(this, index);
- }
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- this, index, false);
+ return JSObject::cast(this)->HasElementWithReceiver(this, index);
}
@@ -5485,8 +5252,8 @@ void Map::ClearCodeCache(Heap* heap) {
// Please note this function is used during marking:
// - MarkCompactCollector::MarkUnmarkedObject
// - IncrementalMarking::Step
- ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
- WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
+ ASSERT(!heap->InNewSpace(heap->raw_unchecked_empty_fixed_array()));
+ WRITE_FIELD(this, kCodeCacheOffset, heap->raw_unchecked_empty_fixed_array());
}
@@ -5580,7 +5347,7 @@ Handle<Object> TypeFeedbackCells::MegamorphicSentinel(Isolate* isolate) {
Object* TypeFeedbackCells::RawUninitializedSentinel(Heap* heap) {
- return heap->the_hole_value();
+ return heap->raw_unchecked_the_hole_value();
}
diff --git a/deps/v8/src/objects-printer.cc b/deps/v8/src/objects-printer.cc
index 9a1a58ef8..b1118de9c 100644
--- a/deps/v8/src/objects-printer.cc
+++ b/deps/v8/src/objects-printer.cc
@@ -384,7 +384,7 @@ void JSObject::PrintElements(FILE* out) {
case EXTERNAL_DOUBLE_ELEMENTS: {
ExternalDoubleArray* p = ExternalDoubleArray::cast(elements());
for (int i = 0; i < p->length(); i++) {
- PrintF(out, " %d: %f\n", i, p->get_scalar(i));
+ PrintF(out, " %d: %f\n", i, p->get_scalar(i));
}
break;
}
@@ -393,16 +393,11 @@ void JSObject::PrintElements(FILE* out) {
break;
case NON_STRICT_ARGUMENTS_ELEMENTS: {
FixedArray* p = FixedArray::cast(elements());
- PrintF(out, " parameter map:");
for (int i = 2; i < p->length(); i++) {
- PrintF(out, " %d:", i - 2);
+ PrintF(out, " %d: ", i);
p->get(i)->ShortPrint(out);
+ PrintF(out, "\n");
}
- PrintF(out, "\n context: ");
- p->get(0)->ShortPrint(out);
- PrintF(out, "\n arguments: ");
- p->get(1)->ShortPrint(out);
- PrintF(out, "\n");
break;
}
}
diff --git a/deps/v8/src/objects-visiting-inl.h b/deps/v8/src/objects-visiting-inl.h
index 925b2562f..d698a8df0 100644
--- a/deps/v8/src/objects-visiting-inl.h
+++ b/deps/v8/src/objects-visiting-inl.h
@@ -68,7 +68,7 @@ void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
SharedFunctionInfo::BodyDescriptor,
int>::Visit);
- table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
+ table_.Register(kVisitSeqAsciiString, &VisitSeqAsciiString);
table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
@@ -110,7 +110,10 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
SlicedString::BodyDescriptor,
void>::Visit);
- table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
+ table_.Register(kVisitFixedArray,
+ &FlexibleBodyVisitor<StaticVisitor,
+ FixedArray::BodyDescriptor,
+ void>::Visit);
table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
@@ -120,7 +123,7 @@ void StaticMarkingVisitor<StaticVisitor>::Initialize() {
table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
- table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
+ table_.Register(kVisitSeqAsciiString, &DataObjectVisitor::Visit);
table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
@@ -222,17 +225,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
- Heap* heap, RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Code* target = rinfo->code_age_stub();
- ASSERT(target != NULL);
- heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
- StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
Map* map, HeapObject* object) {
FixedBodyVisitor<StaticVisitor,
@@ -284,9 +276,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCode(
if (FLAG_cleanup_code_caches_at_gc) {
code->ClearTypeFeedbackCells(heap);
}
- if (FLAG_age_code && !Serializer::enabled()) {
- code->MakeOlder(heap->mark_compact_collector()->marking_parity());
- }
code->CodeIterateBody<StaticVisitor>(heap);
}
@@ -299,13 +288,6 @@ void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
if (shared->ic_age() != heap->global_ic_age()) {
shared->ResetForNewContext(heap->global_ic_age());
}
- if (FLAG_cache_optimized_code) {
- // Flush optimized code map on major GC.
- // TODO(mstarzinger): We may experiment with rebuilding it or with
- // retaining entries which should survive as we iterate through
- // optimized functions anyway.
- shared->ClearOptimizedCodeMap();
- }
MarkCompactCollector* collector = heap->mark_compact_collector();
if (collector->is_code_flushing_enabled()) {
if (IsFlushable(heap, shared)) {
@@ -467,10 +449,8 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
// by optimized version of function.
MarkBit code_mark = Marking::MarkBitFrom(function->code());
if (code_mark.Get()) {
- if (!FLAG_age_code) {
- if (!Marking::MarkBitFrom(shared_info).Get()) {
- shared_info->set_code_age(0);
- }
+ if (!Marking::MarkBitFrom(shared_info).Get()) {
+ shared_info->set_code_age(0);
}
return false;
}
@@ -480,16 +460,11 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- // We do not (yet) flush code for optimized functions.
+ // We do not flush code for optimized functions.
if (function->code() != shared_info->code()) {
return false;
}
- // Check age of optimized code.
- if (FLAG_age_code && !function->code()->IsOld()) {
- return false;
- }
-
return IsFlushable(heap, shared_info);
}
@@ -531,20 +506,20 @@ bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
return false;
}
- if (FLAG_age_code) {
- return shared_info->code()->IsOld();
- } else {
- // How many collections newly compiled code object will survive before being
- // flushed.
- static const int kCodeAgeThreshold = 5;
-
- // Age this shared function info.
- if (shared_info->code_age() < kCodeAgeThreshold) {
- shared_info->set_code_age(shared_info->code_age() + 1);
- return false;
- }
- return true;
+ // TODO(mstarzinger): The following will soon be replaced by a new way of
+ // aging code, that is based on an aging stub in the function prologue.
+
+ // How many collections newly compiled code object will survive before being
+ // flushed.
+ static const int kCodeAgeThreshold = 5;
+
+ // Age this shared function info.
+ if (shared_info->code_age() < kCodeAgeThreshold) {
+ shared_info->set_code_age(shared_info->code_age() + 1);
+ return false;
}
+
+ return true;
}
diff --git a/deps/v8/src/objects-visiting.cc b/deps/v8/src/objects-visiting.cc
index 7082e59f3..a2dc43e24 100644
--- a/deps/v8/src/objects-visiting.cc
+++ b/deps/v8/src/objects-visiting.cc
@@ -45,8 +45,8 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
if (instance_type < FIRST_NONSTRING_TYPE) {
switch (instance_type & kStringRepresentationMask) {
case kSeqStringTag:
- if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
- return kVisitSeqOneByteString;
+ if ((instance_type & kStringEncodingMask) == kAsciiStringTag) {
+ return kVisitSeqAsciiString;
} else {
return kVisitSeqTwoByteString;
}
diff --git a/deps/v8/src/objects-visiting.h b/deps/v8/src/objects-visiting.h
index 29f3cbc59..26d1b121d 100644
--- a/deps/v8/src/objects-visiting.h
+++ b/deps/v8/src/objects-visiting.h
@@ -47,7 +47,7 @@ namespace internal {
class StaticVisitorBase : public AllStatic {
public:
#define VISITOR_ID_LIST(V) \
- V(SeqOneByteString) \
+ V(SeqAsciiString) \
V(SeqTwoByteString) \
V(ShortcutCandidate) \
V(ByteArray) \
@@ -318,9 +318,9 @@ class StaticNewSpaceVisitor : public StaticVisitorBase {
return JSObjectVisitor::Visit(map, object);
}
- static inline int VisitSeqOneByteString(Map* map, HeapObject* object) {
- return SeqOneByteString::cast(object)->
- SeqOneByteStringSize(map->instance_type());
+ static inline int VisitSeqAsciiString(Map* map, HeapObject* object) {
+ return SeqAsciiString::cast(object)->
+ SeqAsciiStringSize(map->instance_type());
}
static inline int VisitSeqTwoByteString(Map* map, HeapObject* object) {
@@ -391,11 +391,13 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitGlobalPropertyCell(Heap* heap, RelocInfo* rinfo);
static inline void VisitDebugTarget(Heap* heap, RelocInfo* rinfo);
static inline void VisitCodeTarget(Heap* heap, RelocInfo* rinfo);
- static inline void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo);
static inline void VisitExternalReference(RelocInfo* rinfo) { }
static inline void VisitRuntimeEntry(RelocInfo* rinfo) { }
// TODO(mstarzinger): This should be made protected once refactoring is done.
+ static inline void VisitNativeContext(Map* map, HeapObject* object);
+
+ // TODO(mstarzinger): This should be made protected once refactoring is done.
// Mark non-optimize code for functions inlined into the given optimized
// code. This will prevent it from being flushed.
static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
@@ -406,7 +408,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
static inline void VisitSharedFunctionInfo(Map* map, HeapObject* object);
static inline void VisitJSFunction(Map* map, HeapObject* object);
static inline void VisitJSRegExp(Map* map, HeapObject* object);
- static inline void VisitNativeContext(Map* map, HeapObject* object);
// Mark pointers in a Map and its TransitionArray together, possibly
// treating transitions or back pointers weak.
@@ -435,10 +436,6 @@ class StaticMarkingVisitor : public StaticVisitorBase {
};
typedef FlexibleBodyVisitor<StaticVisitor,
- FixedArray::BodyDescriptor,
- void> FixedArrayVisitor;
-
- typedef FlexibleBodyVisitor<StaticVisitor,
JSObject::BodyDescriptor,
void> JSObjectVisitor;
diff --git a/deps/v8/src/objects.cc b/deps/v8/src/objects.cc
index 126d1e0fd..792b6d984 100644
--- a/deps/v8/src/objects.cc
+++ b/deps/v8/src/objects.cc
@@ -27,7 +27,6 @@
#include "v8.h"
-#include "accessors.h"
#include "api.h"
#include "arguments.h"
#include "bootstrapper.h"
@@ -248,18 +247,6 @@ MaybeObject* JSProxy::GetPropertyWithHandler(Object* receiver_raw,
}
-Handle<Object> Object::GetProperty(Handle<Object> object, Handle<String> name) {
- // TODO(rossberg): The index test should not be here but in the GetProperty
- // method (or somewhere else entirely). Needs more global clean-up.
- uint32_t index;
- if (name->AsArrayIndex(&index)) return GetElement(object, index);
- Isolate* isolate = object->IsHeapObject()
- ? Handle<HeapObject>::cast(object)->GetIsolate()
- : Isolate::Current();
- CALL_HEAP_FUNCTION(isolate, object->GetProperty(*name), Object);
-}
-
-
Handle<Object> Object::GetElement(Handle<Object> object, uint32_t index) {
Isolate* isolate = object->IsHeapObject()
? Handle<HeapObject>::cast(object)->GetIsolate()
@@ -654,8 +641,7 @@ MaybeObject* Object::GetProperty(Object* receiver,
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case FIELD:
- value = result->holder()->FastPropertyAt(
- result->GetFieldIndex().field_index());
+ value = result->holder()->FastPropertyAt(result->GetFieldIndex());
ASSERT(!value->IsTheHole() || result->IsReadOnly());
return value->IsTheHole() ? heap->undefined_value() : value;
case CONSTANT_FUNCTION:
@@ -895,15 +881,14 @@ MaybeObject* String::SlowTryFlatten(PretenureFlag pretenure) {
int len = length();
Object* object;
String* result;
- if (IsOneByteRepresentation()) {
- { MaybeObject* maybe_object =
- heap->AllocateRawOneByteString(len, tenure);
+ if (IsAsciiRepresentation()) {
+ { MaybeObject* maybe_object = heap->AllocateRawAsciiString(len, tenure);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
result = String::cast(object);
String* first = cs->first();
int first_length = first->length();
- char* dest = SeqOneByteString::cast(result)->GetChars();
+ char* dest = SeqAsciiString::cast(result)->GetChars();
WriteToFlat(first, dest, 0, first_length);
String* second = cs->second();
WriteToFlat(second,
@@ -956,7 +941,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
if (size < ExternalString::kShortSize) {
return false;
}
- bool is_ascii = this->IsOneByteRepresentation();
+ bool is_ascii = this->IsAsciiRepresentation();
bool is_symbol = this->IsSymbol();
// Morph the object to an external string by adjusting the map and
@@ -1133,10 +1118,6 @@ void JSObject::JSObjectShortPrint(StringStream* accumulator) {
}
break;
}
- case JS_MODULE_TYPE: {
- accumulator->Add("<JS Module>");
- break;
- }
// All other JSObjects are rather similar to each other (JSObject,
// JSGlobalProxy, JSGlobalObject, JSUndetectableObject, JSValue).
default: {
@@ -1359,7 +1340,7 @@ void HeapObject::IterateBody(InstanceType type, int object_size,
SlicedString::BodyDescriptor::IterateBody(this, v);
break;
case kExternalStringTag:
- if ((type & kStringEncodingMask) == kOneByteStringTag) {
+ if ((type & kStringEncodingMask) == kAsciiStringTag) {
reinterpret_cast<ExternalAsciiString*>(this)->
ExternalAsciiStringIterateBody(v);
} else {
@@ -1696,88 +1677,39 @@ MaybeObject* JSObject::AddProperty(String* name,
ASSERT(!IsJSGlobalProxy());
Map* map_of_this = map();
Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
- MaybeObject* result;
if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
!map_of_this->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return value;
} else {
Handle<Object> args[1] = {Handle<String>(name)};
- return isolate->Throw(
+ return heap->isolate()->Throw(
*FACTORY->NewTypeError("object_not_extensible",
HandleVector(args, 1)));
}
}
-
if (HasFastProperties()) {
// Ensure the descriptor array does not get too big.
if (map_of_this->NumberOfOwnDescriptors() <
DescriptorArray::kMaxNumberOfDescriptors) {
if (value->IsJSFunction()) {
- result = AddConstantFunctionProperty(name,
- JSFunction::cast(value),
- attributes);
+ return AddConstantFunctionProperty(name,
+ JSFunction::cast(value),
+ attributes);
} else {
- result = AddFastProperty(name, value, attributes, store_mode);
+ return AddFastProperty(name, value, attributes, store_mode);
}
} else {
// Normalize the object to prevent very large instance descriptors.
// This eliminates unwanted N^2 allocation and lookup behavior.
Object* obj;
- MaybeObject* maybe = NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!maybe->To(&obj)) return maybe;
- result = AddSlowProperty(name, value, attributes);
+ { MaybeObject* maybe_obj =
+ NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
}
- } else {
- result = AddSlowProperty(name, value, attributes);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (FLAG_harmony_observation && map()->is_observed()) {
- EnqueueChangeRecord(handle(this, isolate),
- "new",
- handle(name, isolate),
- handle(heap->the_hole_value(), isolate));
- }
-
- return *hresult;
-}
-
-
-void JSObject::EnqueueChangeRecord(Handle<JSObject> object,
- const char* type_str,
- Handle<String> name,
- Handle<Object> old_value) {
- Isolate* isolate = object->GetIsolate();
- HandleScope scope;
- Handle<String> type = isolate->factory()->LookupAsciiSymbol(type_str);
- if (object->IsJSGlobalObject()) {
- object = handle(JSGlobalObject::cast(*object)->global_receiver(), isolate);
}
- Handle<Object> args[] = { type, object, name, old_value };
- bool threw;
- Execution::Call(Handle<JSFunction>(isolate->observers_notify_change()),
- Handle<Object>(isolate->heap()->undefined_value()),
- old_value->IsTheHole() ? 3 : 4, args,
- &threw);
- ASSERT(!threw);
-}
-
-
-void JSObject::DeliverChangeRecords(Isolate* isolate) {
- ASSERT(isolate->observer_delivery_pending());
- bool threw = false;
- Execution::Call(
- isolate->observers_deliver_changes(),
- isolate->factory()->undefined_value(),
- 0,
- NULL,
- &threw);
- ASSERT(!threw);
- isolate->set_observer_delivery_pending(false);
+ return AddSlowProperty(name, value, attributes);
}
@@ -1975,7 +1907,7 @@ MaybeObject* JSReceiver::SetProperty(String* name,
StrictModeFlag strict_mode,
JSReceiver::StoreFromKeyed store_mode) {
LookupResult result(GetIsolate());
- LocalLookup(name, &result, true);
+ LocalLookup(name, &result);
if (!result.IsFound()) {
map()->LookupTransition(JSObject::cast(this), name, &result);
}
@@ -2461,7 +2393,7 @@ void JSObject::LocalLookupRealNamedProperty(String* name,
// occur as fields.
if (result->IsField() &&
result->IsReadOnly() &&
- FastPropertyAt(result->GetFieldIndex().field_index())->IsTheHole()) {
+ FastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
result->DisallowCaching();
}
return;
@@ -2801,14 +2733,12 @@ MUST_USE_RESULT PropertyAttributes JSProxy::GetPropertyAttributeWithHandler(
MUST_USE_RESULT PropertyAttributes JSProxy::GetElementAttributeWithHandler(
- JSReceiver* receiver_raw,
+ JSReceiver* receiver,
uint32_t index) {
Isolate* isolate = GetIsolate();
HandleScope scope(isolate);
- Handle<JSProxy> proxy(this);
- Handle<JSReceiver> receiver(receiver_raw);
Handle<String> name = isolate->factory()->Uint32ToString(index);
- return proxy->GetPropertyAttributeWithHandler(*receiver, *name);
+ return GetPropertyAttributeWithHandler(receiver, *name);
}
@@ -2872,14 +2802,13 @@ void JSObject::AddFastPropertyUsingMap(Handle<JSObject> object,
}
-MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
+MaybeObject* JSObject::SetPropertyForResult(LookupResult* result,
String* name_raw,
Object* value_raw,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
StoreFromKeyed store_mode) {
Heap* heap = GetHeap();
- Isolate* isolate = heap->isolate();
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
@@ -2898,9 +2827,9 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
+ if (!heap->isolate()->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
return SetPropertyWithFailedAccessCheck(
- lookup, name_raw, value_raw, true, strict_mode);
+ result, name_raw, value_raw, true, strict_mode);
}
}
@@ -2909,78 +2838,66 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (proto->IsNull()) return value_raw;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetPropertyForResult(
- lookup, name_raw, value_raw, attributes, strict_mode, store_mode);
+ result, name_raw, value_raw, attributes, strict_mode, store_mode);
}
// From this point on everything needs to be handlified, because
// SetPropertyViaPrototypes might call back into JavaScript.
- HandleScope scope(isolate);
+ HandleScope scope(GetIsolate());
Handle<JSObject> self(this);
Handle<String> name(name_raw);
- Handle<Object> value(value_raw, isolate);
+ Handle<Object> value(value_raw);
- if (!lookup->IsProperty() && !self->IsJSContextExtensionObject()) {
+ if (!result->IsProperty() && !self->IsJSContextExtensionObject()) {
bool done = false;
MaybeObject* result_object = self->SetPropertyViaPrototypes(
*name, *value, attributes, strict_mode, &done);
if (done) return result_object;
}
- if (!lookup->IsFound()) {
+ if (!result->IsFound()) {
// Neither properties nor transitions found.
return self->AddProperty(
*name, *value, attributes, strict_mode, store_mode);
}
-
- if (lookup->IsProperty() && lookup->IsReadOnly()) {
+ if (result->IsProperty() && result->IsReadOnly()) {
if (strict_mode == kStrictMode) {
Handle<Object> args[] = { name, self };
- return isolate->Throw(*isolate->factory()->NewTypeError(
+ return heap->isolate()->Throw(*heap->isolate()->factory()->NewTypeError(
"strict_read_only_property", HandleVector(args, ARRAY_SIZE(args))));
} else {
return *value;
}
}
- Handle<Object> old_value(heap->the_hole_value(), isolate);
- if (FLAG_harmony_observation && map()->is_observed()) {
- old_value = handle(lookup->GetLazyValue(), isolate);
- }
-
// This is a real property that is not read-only, or it is a
// transition or null descriptor and there are no setters in the prototypes.
- MaybeObject* result = *value;
- switch (lookup->type()) {
+ switch (result->type()) {
case NORMAL:
- result = self->SetNormalizedProperty(lookup, *value);
- break;
+ return self->SetNormalizedProperty(result, *value);
case FIELD:
- result = self->FastPropertyAtPut(
- lookup->GetFieldIndex().field_index(), *value);
- break;
+ return self->FastPropertyAtPut(result->GetFieldIndex(), *value);
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (*value == lookup->GetConstantFunction()) return *value;
+ if (*value == result->GetConstantFunction()) return *value;
// Preserve the attributes of this existing property.
- attributes = lookup->GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- break;
+ attributes = result->GetAttributes();
+ return self->ConvertDescriptorToField(*name, *value, attributes);
case CALLBACKS: {
- Object* callback_object = lookup->GetCallbackObject();
+ Object* callback_object = result->GetCallbackObject();
return self->SetPropertyWithCallback(callback_object,
*name,
*value,
- lookup->holder(),
+ result->holder(),
strict_mode);
}
case INTERCEPTOR:
- result = self->SetPropertyWithInterceptor(*name,
- *value,
- attributes,
- strict_mode);
- break;
+ return self->SetPropertyWithInterceptor(*name,
+ *value,
+ attributes,
+ strict_mode);
case TRANSITION: {
- Map* transition_map = lookup->GetTransitionTarget();
+ Map* transition_map = result->GetTransitionTarget();
int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
@@ -2989,55 +2906,37 @@ MaybeObject* JSObject::SetPropertyForResult(LookupResult* lookup,
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(transition_map,
- *name,
- *value,
- field_index);
- } else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ return self->AddFastPropertyUsingMap(transition_map,
+ *name,
+ *value,
+ field_index);
}
+ return self->ConvertDescriptorToField(*name, *value, attributes);
} else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
-
- Object* constant_function = descriptors->GetValue(descriptor);
- if (constant_function == *value) {
- // If the same constant function is being added we can simply
- // transition to the target map.
- self->set_map(transition_map);
- result = constant_function;
- } else {
- // Otherwise, replace with a map transition to a new map with a FIELD,
- // even if the value is a constant function.
- result = self->ConvertTransitionToMapTransition(
- lookup->GetTransitionIndex(), *name, *value, attributes);
- }
+ return ConvertDescriptorToField(*name, *value, attributes);
}
- break;
- }
- case HANDLER:
- case NONEXISTENT:
- UNREACHABLE();
- }
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
+ ASSERT(details.type() == CONSTANT_FUNCTION);
- if (FLAG_harmony_observation && map()->is_observed()) {
- if (lookup->IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
- } else {
- LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
- ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
- if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ Object* constant_function = descriptors->GetValue(descriptor);
+ // If the same constant function is being added we can simply
+ // transition to the target map.
+ if (constant_function == *value) {
+ self->set_map(transition_map);
+ return constant_function;
}
+ // Otherwise, replace with a map transition to a new map with a FIELD,
+ // even if the value is a constant function.
+ return ConvertTransitionToMapTransition(
+ result->GetTransitionIndex(), *name, *value, attributes);
}
+ case HANDLER:
+ case NONEXISTENT:
+ UNREACHABLE();
+ return *value;
}
-
- return *hresult;
+ UNREACHABLE(); // keep the compiler happy
+ return *value;
}
@@ -3063,22 +2962,22 @@ Handle<Object> JSObject::SetLocalPropertyIgnoreAttributes(
MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
- String* name_raw,
- Object* value_raw,
+ String* name,
+ Object* value,
PropertyAttributes attributes) {
// Make sure that the top context does not change when doing callbacks or
// interceptor calls.
AssertNoContextChange ncc;
Isolate* isolate = GetIsolate();
- LookupResult lookup(isolate);
- LocalLookup(name_raw, &lookup, true);
- if (!lookup.IsFound()) map()->LookupTransition(this, name_raw, &lookup);
+ LookupResult result(isolate);
+ LocalLookup(name, &result);
+ if (!result.IsFound()) map()->LookupTransition(this, name, &result);
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
- return SetPropertyWithFailedAccessCheck(&lookup,
- name_raw,
- value_raw,
+ if (!isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
+ return SetPropertyWithFailedAccessCheck(&result,
+ name,
+ value,
false,
kNonStrictMode);
}
@@ -3086,69 +2985,40 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetLocalPropertyIgnoreAttributes(
- name_raw,
- value_raw,
+ name,
+ value,
attributes);
}
// Check for accessor in prototype chain removed here in clone.
- if (!lookup.IsFound()) {
+ if (!result.IsFound()) {
// Neither properties nor transitions found.
- return AddProperty(name_raw, value_raw, attributes, kNonStrictMode);
- }
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> value(value_raw, isolate);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value(), isolate);
- PropertyAttributes old_attributes = ABSENT;
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- if (is_observed) {
- // Function prototypes are stored specially
- if (self->IsJSFunction() &&
- JSFunction::cast(*self)->should_have_prototype() &&
- name->Equals(isolate->heap()->prototype_symbol())) {
- MaybeObject* maybe = Accessors::FunctionGetPrototype(*self, NULL);
- if (!maybe->ToHandle(&old_value, isolate)) return maybe;
- } else {
- old_value = handle(lookup.GetLazyValue(), isolate);
- }
- old_attributes = lookup.GetAttributes();
+ return AddProperty(name, value, attributes, kNonStrictMode);
}
// Check of IsReadOnly removed from here in clone.
- MaybeObject* result = *value;
- switch (lookup.type()) {
+ switch (result.type()) {
case NORMAL: {
PropertyDetails details = PropertyDetails(attributes, NORMAL);
- result = self->SetNormalizedProperty(*name, *value, details);
- break;
+ return SetNormalizedProperty(name, value, details);
}
case FIELD:
- result = self->FastPropertyAtPut(
- lookup.GetFieldIndex().field_index(), *value);
- break;
+ return FastPropertyAtPut(result.GetFieldIndex(), value);
case CONSTANT_FUNCTION:
// Only replace the function if necessary.
- if (*value != lookup.GetConstantFunction()) {
- // Preserve the attributes of this existing property.
- attributes = lookup.GetAttributes();
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- }
- break;
+ if (value == result.GetConstantFunction()) return value;
+ // Preserve the attributes of this existing property.
+ attributes = result.GetAttributes();
+ return ConvertDescriptorToField(name, value, attributes);
case CALLBACKS:
case INTERCEPTOR:
// Override callback in clone
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- break;
+ return ConvertDescriptorToField(name, value, attributes);
case TRANSITION: {
- Map* transition_map = lookup.GetTransitionTarget();
+ Map* transition_map = result.GetTransitionTarget();
int descriptor = transition_map->LastAdded();
DescriptorArray* descriptors = transition_map->instance_descriptors();
@@ -3157,48 +3027,29 @@ MaybeObject* JSObject::SetLocalPropertyIgnoreAttributes(
if (details.type() == FIELD) {
if (attributes == details.attributes()) {
int field_index = descriptors->GetFieldIndex(descriptor);
- result = self->AddFastPropertyUsingMap(
- transition_map, *name, *value, field_index);
- } else {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
+ return AddFastPropertyUsingMap(transition_map,
+ name,
+ value,
+ field_index);
}
+ return ConvertDescriptorToField(name, value, attributes);
} else if (details.type() == CALLBACKS) {
- result = self->ConvertDescriptorToField(*name, *value, attributes);
- } else {
- ASSERT(details.type() == CONSTANT_FUNCTION);
-
- // Replace transition to CONSTANT FUNCTION with a map transition to a
- // new map with a FIELD, even if the value is a function.
- result = self->ConvertTransitionToMapTransition(
- lookup.GetTransitionIndex(), *name, *value, attributes);
+ return ConvertDescriptorToField(name, value, attributes);
}
- break;
+
+ ASSERT(details.type() == CONSTANT_FUNCTION);
+
+ // Replace transition to CONSTANT FUNCTION with a map transition to a new
+ // map with a FIELD, even if the value is a function.
+ return ConvertTransitionToMapTransition(
+ result.GetTransitionIndex(), name, value, attributes);
}
case HANDLER:
case NONEXISTENT:
UNREACHABLE();
}
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed) {
- if (lookup.IsTransition()) {
- EnqueueChangeRecord(self, "new", name, old_value);
- } else {
- LookupResult new_lookup(isolate);
- self->LocalLookup(*name, &new_lookup, true);
- ASSERT(!new_lookup.GetLazyValue()->IsTheHole());
- if (old_value->IsTheHole() ||
- new_lookup.GetAttributes() != old_attributes) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else if (!new_lookup.GetLazyValue()->SameValue(*old_value)) {
- EnqueueChangeRecord(self, "updated", name, old_value);
- }
- }
- }
-
- return *hresult;
+ UNREACHABLE(); // keep the compiler happy
+ return value;
}
@@ -3279,43 +3130,42 @@ PropertyAttributes JSReceiver::GetPropertyAttributeWithReceiver(
String* key) {
uint32_t index = 0;
if (IsJSObject() && key->AsArrayIndex(&index)) {
- return JSObject::cast(this)->GetElementAttributeWithReceiver(
- receiver, index, true);
+ return JSObject::cast(this)->HasElementWithReceiver(receiver, index)
+ ? NONE : ABSENT;
}
// Named property.
- LookupResult lookup(GetIsolate());
- Lookup(key, &lookup);
- return GetPropertyAttributeForResult(receiver, &lookup, key, true);
+ LookupResult result(GetIsolate());
+ Lookup(key, &result);
+ return GetPropertyAttribute(receiver, &result, key, true);
}
-PropertyAttributes JSReceiver::GetPropertyAttributeForResult(
- JSReceiver* receiver,
- LookupResult* lookup,
- String* name,
- bool continue_search) {
+PropertyAttributes JSReceiver::GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search) {
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
JSObject* this_obj = JSObject::cast(this);
Heap* heap = GetHeap();
if (!heap->isolate()->MayNamedAccess(this_obj, name, v8::ACCESS_HAS)) {
return this_obj->GetPropertyAttributeWithFailedAccessCheck(
- receiver, lookup, name, continue_search);
+ receiver, result, name, continue_search);
}
}
- if (lookup->IsFound()) {
- switch (lookup->type()) {
+ if (result->IsFound()) {
+ switch (result->type()) {
case NORMAL: // fall through
case FIELD:
case CONSTANT_FUNCTION:
case CALLBACKS:
- return lookup->GetAttributes();
+ return result->GetAttributes();
case HANDLER: {
- return JSProxy::cast(lookup->proxy())->GetPropertyAttributeWithHandler(
+ return JSProxy::cast(result->proxy())->GetPropertyAttributeWithHandler(
receiver, name);
}
case INTERCEPTOR:
- return lookup->holder()->GetPropertyAttributeWithInterceptor(
+ return result->holder()->GetPropertyAttributeWithInterceptor(
JSObject::cast(receiver), name, continue_search);
case TRANSITION:
case NONEXISTENT:
@@ -3330,118 +3180,13 @@ PropertyAttributes JSReceiver::GetLocalPropertyAttribute(String* name) {
// Check whether the name is an array index.
uint32_t index = 0;
if (IsJSObject() && name->AsArrayIndex(&index)) {
- return GetLocalElementAttribute(index);
+ if (JSObject::cast(this)->HasLocalElement(index)) return NONE;
+ return ABSENT;
}
// Named property.
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return GetPropertyAttributeForResult(this, &lookup, name, false);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithReceiver(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
-
- // Check access rights if needed.
- if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
- return ABSENT;
- }
- }
-
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return ABSENT;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetElementAttributeWithReceiver(
- receiver, index, continue_search);
- }
-
- // Check for lookup interceptor except when bootstrapping.
- if (HasIndexedInterceptor() && !isolate->bootstrapper()->IsActive()) {
- return GetElementAttributeWithInterceptor(receiver, index, continue_search);
- }
-
- // Handle [] on String objects.
- if (this->IsStringObjectWithCharacterAt(index)) {
- return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- }
-
- return GetElementAttributeWithoutInterceptor(
- receiver, index, continue_search);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
- // Make sure that the top context does not change when doing
- // callbacks or interceptor calls.
- AssertNoContextChange ncc;
- HandleScope scope(isolate);
- Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- CustomArguments args(isolate, interceptor->data(), receiver, this);
- v8::AccessorInfo info(args.end());
- if (!interceptor->query()->IsUndefined()) {
- v8::IndexedPropertyQuery query =
- v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
- v8::Handle<v8::Integer> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = query(index, info);
- }
- if (!result.IsEmpty())
- return static_cast<PropertyAttributes>(result->Int32Value());
- } else if (!interceptor->getter()->IsUndefined()) {
- v8::IndexedPropertyGetter getter =
- v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
- LOG(isolate,
- ApiIndexedPropertyAccess("interceptor-indexed-get-has", this, index));
- v8::Handle<v8::Value> result;
- {
- // Leaving JavaScript.
- VMState state(isolate, EXTERNAL);
- result = getter(index, info);
- }
- if (!result.IsEmpty()) return NONE;
- }
-
- return holder->GetElementAttributeWithoutInterceptor(
- *hreceiver, index, continue_search);
-}
-
-
-PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver, uint32_t index, bool continue_search) {
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSReceiver> hreceiver(receiver);
- Handle<JSObject> holder(this);
- PropertyAttributes attr = holder->GetElementsAccessor()->GetAttributes(
- *hreceiver, *holder, index);
- if (attr != ABSENT) return attr;
-
- if (holder->IsStringObjectWithCharacterAt(index)) {
- return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
- }
-
- if (!continue_search) return ABSENT;
-
- Object* pt = holder->GetPrototype();
- if (pt->IsJSProxy()) {
- // We need to follow the spec and simulate a call to [[GetOwnProperty]].
- return JSProxy::cast(pt)->GetElementAttributeWithHandler(*hreceiver, index);
- }
- if (pt->IsNull()) return ABSENT;
- return JSObject::cast(pt)->GetElementAttributeWithReceiver(
- *hreceiver, index, true);
+ LookupResult result(GetIsolate());
+ LocalLookup(name, &result);
+ return GetPropertyAttribute(this, &result, name, false);
}
@@ -4166,38 +3911,15 @@ MaybeObject* JSObject::DeleteElement(uint32_t index, DeleteMode mode) {
return JSGlobalObject::cast(proto)->DeleteElement(index, mode);
}
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
-
- Handle<Object> old_value;
- bool should_enqueue_change_record = false;
- if (FLAG_harmony_observation && self->map()->is_observed()) {
- should_enqueue_change_record = self->HasLocalElement(index);
- if (should_enqueue_change_record) {
- old_value = self->GetLocalElementAccessorPair(index) != NULL
- ? Handle<Object>::cast(isolate->factory()->the_hole_value())
- : Object::GetElement(self, index);
+ if (HasIndexedInterceptor()) {
+ // Skip interceptor if forcing deletion.
+ if (mode != FORCE_DELETION) {
+ return DeleteElementWithInterceptor(index);
}
+ mode = JSReceiver::FORCE_DELETION;
}
- MaybeObject* result;
- // Skip interceptor if forcing deletion.
- if (self->HasIndexedInterceptor() && mode != FORCE_DELETION) {
- result = self->DeleteElementWithInterceptor(index);
- } else {
- result = self->GetElementsAccessor()->Delete(*self, index, mode);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (should_enqueue_change_record && !self->HasLocalElement(index)) {
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- EnqueueChangeRecord(self, "deleted", name, old_value);
- }
-
- return *hresult;
+ return GetElementsAccessor()->Delete(this, index, mode);
}
@@ -4231,60 +3953,38 @@ MaybeObject* JSObject::DeleteProperty(String* name, DeleteMode mode) {
uint32_t index = 0;
if (name->AsArrayIndex(&index)) {
return DeleteElement(index, mode);
- }
-
- LookupResult lookup(isolate);
- LocalLookup(name, &lookup, true);
- if (!lookup.IsFound()) return isolate->heap()->true_value();
- // Ignore attributes if forcing a deletion.
- if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
- if (mode == STRICT_DELETION) {
- // Deleting a non-configurable property in strict mode.
- HandleScope scope(isolate);
- Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
- return isolate->Throw(*isolate->factory()->NewTypeError(
- "strict_delete_property", HandleVector(args, 2)));
- }
- return isolate->heap()->false_value();
- }
-
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> hname(name);
-
- Handle<Object> old_value(isolate->heap()->the_hole_value());
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- if (is_observed) {
- old_value = handle(lookup.GetLazyValue(), isolate);
- }
- MaybeObject* result;
-
- // Check for interceptor.
- if (lookup.IsInterceptor()) {
- // Skip interceptor if forcing a deletion.
- if (mode == FORCE_DELETION) {
- result = self->DeletePropertyPostInterceptor(*hname, mode);
- } else {
- result = self->DeletePropertyWithInterceptor(*hname);
- }
} else {
+ LookupResult result(isolate);
+ LocalLookup(name, &result);
+ if (!result.IsFound()) return isolate->heap()->true_value();
+ // Ignore attributes if forcing a deletion.
+ if (result.IsDontDelete() && mode != FORCE_DELETION) {
+ if (mode == STRICT_DELETION) {
+ // Deleting a non-configurable property in strict mode.
+ HandleScope scope(isolate);
+ Handle<Object> args[2] = { Handle<Object>(name), Handle<Object>(this) };
+ return isolate->Throw(*isolate->factory()->NewTypeError(
+ "strict_delete_property", HandleVector(args, 2)));
+ }
+ return isolate->heap()->false_value();
+ }
+ // Check for interceptor.
+ if (result.IsInterceptor()) {
+ // Skip interceptor if forcing a deletion.
+ if (mode == FORCE_DELETION) {
+ return DeletePropertyPostInterceptor(name, mode);
+ }
+ return DeletePropertyWithInterceptor(name);
+ }
// Normalize object if needed.
Object* obj;
- result = self->NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
- if (!result->To(&obj)) return result;
+ { MaybeObject* maybe_obj =
+ NormalizeProperties(CLEAR_INOBJECT_PROPERTIES, 0);
+ if (!maybe_obj->ToObject(&obj)) return maybe_obj;
+ }
// Make sure the properties are normalized before removing the entry.
- result = self->DeleteNormalizedProperty(*hname, mode);
- }
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed && !self->HasLocalProperty(*hname)) {
- EnqueueChangeRecord(self, "deleted", hname, old_value);
+ return DeleteNormalizedProperty(name, mode);
}
-
- return *hresult;
}
@@ -4564,8 +4264,7 @@ AccessorDescriptor* Map::FindAccessor(String* name) {
}
-void JSReceiver::LocalLookup(
- String* name, LookupResult* result, bool search_hidden_prototypes) {
+void JSReceiver::LocalLookup(String* name, LookupResult* result) {
ASSERT(name->IsString());
Heap* heap = GetHeap();
@@ -4574,8 +4273,7 @@ void JSReceiver::LocalLookup(
Object* proto = GetPrototype();
if (proto->IsNull()) return result->NotFound();
ASSERT(proto->IsJSGlobalObject());
- return JSReceiver::cast(proto)->LocalLookup(
- name, result, search_hidden_prototypes);
+ return JSReceiver::cast(proto)->LocalLookup(name, result);
}
if (IsJSProxy()) {
@@ -4605,14 +4303,6 @@ void JSReceiver::LocalLookup(
}
js_object->LocalLookupRealNamedProperty(name, result);
- if (result->IsFound() || !search_hidden_prototypes) return;
-
- Object* proto = js_object->GetPrototype();
- if (!proto->IsJSReceiver()) return;
- JSReceiver* receiver = JSReceiver::cast(proto);
- if (receiver->map()->is_hidden_prototype()) {
- receiver->LocalLookup(name, result, search_hidden_prototypes);
- }
}
@@ -4622,7 +4312,7 @@ void JSReceiver::Lookup(String* name, LookupResult* result) {
for (Object* current = this;
current != heap->null_value();
current = JSObject::cast(current)->GetPrototype()) {
- JSReceiver::cast(current)->LocalLookup(name, result, false);
+ JSReceiver::cast(current)->LocalLookup(name, result);
if (result->IsFound()) return;
}
result->NotFound();
@@ -4763,9 +4453,7 @@ MaybeObject* JSObject::DefinePropertyAccessor(String* name,
// to do a lookup, which seems to be a bit of overkill.
Heap* heap = GetHeap();
bool only_attribute_changes = getter->IsNull() && setter->IsNull();
- if (HasFastProperties() && !only_attribute_changes &&
- (map()->NumberOfOwnDescriptors() <
- DescriptorArray::kMaxNumberOfDescriptors)) {
+ if (HasFastProperties() && !only_attribute_changes) {
MaybeObject* getterOk = heap->undefined_value();
if (!getter->IsNull()) {
getterOk = DefineFastAccessor(name, ACCESSOR_GETTER, getter, attributes);
@@ -4895,14 +4583,14 @@ void JSObject::DefineAccessor(Handle<JSObject> object,
object->DefineAccessor(*name, *getter, *setter, attributes));
}
-MaybeObject* JSObject::DefineAccessor(String* name_raw,
- Object* getter_raw,
- Object* setter_raw,
+MaybeObject* JSObject::DefineAccessor(String* name,
+ Object* getter,
+ Object* setter,
PropertyAttributes attributes) {
Isolate* isolate = GetIsolate();
// Check access rights if needed.
if (IsAccessCheckNeeded() &&
- !isolate->MayNamedAccess(this, name_raw, v8::ACCESS_SET)) {
+ !isolate->MayNamedAccess(this, name, v8::ACCESS_SET)) {
isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
return isolate->heap()->undefined_value();
}
@@ -4912,7 +4600,7 @@ MaybeObject* JSObject::DefineAccessor(String* name_raw,
if (proto->IsNull()) return this;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->DefineAccessor(
- name_raw, getter_raw, setter_raw, attributes);
+ name, getter, setter, attributes);
}
// Make sure that the top context does not change when doing callbacks or
@@ -4920,50 +4608,14 @@ MaybeObject* JSObject::DefineAccessor(String* name_raw,
AssertNoContextChange ncc;
// Try to flatten before operating on the string.
- name_raw->TryFlatten();
-
- if (!CanSetCallback(name_raw)) return isolate->heap()->undefined_value();
+ name->TryFlatten();
- // From this point on everything needs to be handlified.
- HandleScope scope(isolate);
- Handle<JSObject> self(this);
- Handle<String> name(name_raw);
- Handle<Object> getter(getter_raw);
- Handle<Object> setter(setter_raw);
+ if (!CanSetCallback(name)) return isolate->heap()->undefined_value();
uint32_t index = 0;
- bool is_element = name->AsArrayIndex(&index);
-
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- bool is_observed = FLAG_harmony_observation && self->map()->is_observed();
- bool preexists = false;
- if (is_observed) {
- if (is_element) {
- preexists = HasLocalElement(index);
- if (preexists && self->GetLocalElementAccessorPair(index) == NULL) {
- old_value = Object::GetElement(self, index);
- }
- } else {
- LookupResult lookup(isolate);
- LocalLookup(*name, &lookup, true);
- preexists = lookup.IsProperty();
- if (preexists) old_value = handle(lookup.GetLazyValue(), isolate);
- }
- }
-
- MaybeObject* result = is_element ?
- self->DefineElementAccessor(index, *getter, *setter, attributes) :
- self->DefinePropertyAccessor(*name, *getter, *setter, attributes);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- if (is_observed) {
- const char* type = preexists ? "reconfigured" : "new";
- EnqueueChangeRecord(self, type, name, old_value);
- }
-
- return *hresult;
+ return name->AsArrayIndex(&index) ?
+ DefineElementAccessor(index, getter, setter, attributes) :
+ DefinePropertyAccessor(name, getter, setter, attributes);
}
@@ -5043,8 +4695,7 @@ MaybeObject* JSObject::DefineFastAccessor(String* name,
if (result.IsFound()) {
Map* target = result.GetTransitionTarget();
int descriptor_number = target->LastAdded();
- ASSERT(target->instance_descriptors()->GetKey(descriptor_number)
- ->Equals(name));
+ ASSERT(target->instance_descriptors()->GetKey(descriptor_number) == name);
return TryAccessorTransition(
this, target, descriptor_number, component, accessor, attributes);
}
@@ -5143,7 +4794,7 @@ MaybeObject* JSObject::DefineAccessor(AccessorInfo* info) {
} else {
// Lookup the name.
LookupResult result(isolate);
- LocalLookup(name, &result, true);
+ LocalLookup(name, &result);
// ES5 forbids turning a property into an accessor if it's not
// configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
@@ -5245,6 +4896,7 @@ MaybeObject* Map::RawCopy(int instance_size) {
result->set_constructor(constructor());
result->set_bit_field(bit_field());
result->set_bit_field2(bit_field2());
+ result->set_bit_field3(bit_field3());
int new_bit_field3 = bit_field3();
new_bit_field3 = OwnsDescriptors::update(new_bit_field3, true);
new_bit_field3 = NumberOfOwnDescriptorsBits::update(new_bit_field3, 0);
@@ -6557,10 +6209,10 @@ String::FlatContent String::GetFlatContent() {
ASSERT(shape.representation_tag() != kConsStringTag &&
shape.representation_tag() != kSlicedStringTag);
}
- if (shape.encoding_tag() == kOneByteStringTag) {
+ if (shape.encoding_tag() == kAsciiStringTag) {
const char* start;
if (shape.representation_tag() == kSeqStringTag) {
- start = SeqOneByteString::cast(string)->GetChars();
+ start = SeqAsciiString::cast(string)->GetChars();
} else {
start = ExternalAsciiString::cast(string)->GetChars();
}
@@ -6643,7 +6295,7 @@ const uc16* String::GetTwoByteData() {
const uc16* String::GetTwoByteData(unsigned start) {
- ASSERT(!IsOneByteRepresentationUnderneath());
+ ASSERT(!IsAsciiRepresentationUnderneath());
switch (StringShape(this).representation_tag()) {
case kSeqStringTag:
return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
@@ -6724,7 +6376,7 @@ void SeqTwoByteString::SeqTwoByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
}
-const unibrow::byte* SeqOneByteString::SeqOneByteStringReadBlock(
+const unibrow::byte* SeqAsciiString::SeqAsciiStringReadBlock(
unsigned* remaining,
unsigned* offset_ptr,
unsigned max_chars) {
@@ -6852,7 +6504,7 @@ void ExternalTwoByteString::ExternalTwoByteStringReadBlockIntoBuffer(
}
-void SeqOneByteString::SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
+void SeqAsciiString::SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* rbb,
unsigned* offset_ptr,
unsigned max_chars) {
unsigned capacity = rbb->capacity - rbb->cursor;
@@ -6895,9 +6547,9 @@ const unibrow::byte* String::ReadBlock(String* input,
}
switch (StringShape(input).representation_tag()) {
case kSeqStringTag:
- if (input->IsOneByteRepresentation()) {
- SeqOneByteString* str = SeqOneByteString::cast(input);
- return str->SeqOneByteStringReadBlock(&rbb->remaining,
+ if (input->IsAsciiRepresentation()) {
+ SeqAsciiString* str = SeqAsciiString::cast(input);
+ return str->SeqAsciiStringReadBlock(&rbb->remaining,
offset_ptr,
max_chars);
} else {
@@ -6912,7 +6564,7 @@ const unibrow::byte* String::ReadBlock(String* input,
offset_ptr,
max_chars);
case kExternalStringTag:
- if (input->IsOneByteRepresentation()) {
+ if (input->IsAsciiRepresentation()) {
return ExternalAsciiString::cast(input)->ExternalAsciiStringReadBlock(
&rbb->remaining,
offset_ptr,
@@ -7026,128 +6678,8 @@ void StringInputBuffer::Seek(unsigned pos) {
}
-String* ConsStringIteratorOp::Operate(ConsString* consString,
- unsigned* outerOffset, int32_t* typeOut, unsigned* lengthOut) {
- ASSERT(*lengthOut == (unsigned)consString->length());
- // Push the root string.
- PushLeft(consString);
- root_ = consString;
- root_type_ = *typeOut;
- root_length_ = *lengthOut;
- unsigned targetOffset = *outerOffset;
- unsigned offset = 0;
- while (true) {
- // Loop until the string is found which contains the target offset.
- String* string = consString->first();
- unsigned length = string->length();
- int32_t type;
- if (targetOffset < offset + length) {
- // Target offset is in the left branch.
- // Mark the descent.
- ClearRightDescent();
- // Keep going if we're still in a ConString.
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- consString = ConsString::cast(string);
- PushLeft(consString);
- continue;
- }
- } else {
- // Descend right.
- // Update progress through the string.
- offset += length;
- // Keep going if we're still in a ConString.
- string = consString->second();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) == kConsStringTag) {
- consString = ConsString::cast(string);
- PushRight(consString, type);
- continue;
- }
- // Mark the descent.
- SetRightDescent();
- // Need this to be updated for the current string.
- length = string->length();
- // Account for the possibility of an empty right leaf.
- while (length == 0) {
- bool blewStack;
- // Need to adjust maximum depth for NextLeaf to work.
- AdjustMaximumDepth();
- string = NextLeaf(&blewStack, &type);
- if (string == NULL) {
- // Luckily, this case is impossible.
- ASSERT(!blewStack);
- return NULL;
- }
- length = string->length();
- }
- }
- // Tell the stack we're done decending.
- AdjustMaximumDepth();
- ASSERT(length != 0);
- // Adjust return values and exit.
- unsigned innerOffset = targetOffset - offset;
- consumed_ += length - innerOffset;
- *outerOffset = innerOffset;
- *typeOut = type;
- *lengthOut = length;
- return string;
- }
- UNREACHABLE();
- return NULL;
-}
-
-
-String* ConsStringIteratorOp::NextLeaf(bool* blewStack, int32_t* typeOut) {
- while (true) {
- // Tree traversal complete.
- if (depth_ == 0) {
- *blewStack = false;
- return NULL;
- }
- // We've lost track of higher nodes.
- if (maximum_depth_ - depth_ == kStackSize) {
- *blewStack = true;
- return NULL;
- }
- // Check if we're done with this level.
- bool haveAlreadyReadRight = trace_ & MaskForDepth(depth_ - 1);
- if (haveAlreadyReadRight) {
- Pop();
- continue;
- }
- // Go right.
- ConsString* consString = frames_[OffsetForDepth(depth_ - 1)];
- String* string = consString->second();
- int32_t type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- // Don't need to mark the descent here.
- // Pop stack so next iteration is in correct place.
- Pop();
- *typeOut = type;
- return string;
- }
- // No need to mark the descent.
- consString = ConsString::cast(string);
- PushRight(consString, type);
- // Need to traverse all the way left.
- while (true) {
- // Continue left.
- // Update marker.
- ClearRightDescent();
- string = consString->first();
- type = string->map()->instance_type();
- if ((type & kStringRepresentationMask) != kConsStringTag) {
- AdjustMaximumDepth();
- *typeOut = type;
- return string;
- }
- consString = ConsString::cast(string);
- PushLeft(consString);
- }
- }
- UNREACHABLE();
- return NULL;
+void SafeStringInputBuffer::Seek(unsigned pos) {
+ Reset(pos, input_);
}
@@ -7164,8 +6696,8 @@ void String::ReadBlockIntoBuffer(String* input,
switch (StringShape(input).representation_tag()) {
case kSeqStringTag:
- if (input->IsOneByteRepresentation()) {
- SeqOneByteString::cast(input)->SeqOneByteStringReadBlockIntoBuffer(rbb,
+ if (input->IsAsciiRepresentation()) {
+ SeqAsciiString::cast(input)->SeqAsciiStringReadBlockIntoBuffer(rbb,
offset_ptr,
max_chars);
return;
@@ -7181,7 +6713,7 @@ void String::ReadBlockIntoBuffer(String* input,
max_chars);
return;
case kExternalStringTag:
- if (input->IsOneByteRepresentation()) {
+ if (input->IsAsciiRepresentation()) {
ExternalAsciiString::cast(input)->
ExternalAsciiStringReadBlockIntoBuffer(rbb, offset_ptr, max_chars);
} else {
@@ -7366,7 +6898,7 @@ void String::WriteToFlat(String* src,
while (true) {
ASSERT(0 <= from && from <= to && to <= source->length());
switch (StringShape(source).full_representation_tag()) {
- case kOneByteStringTag | kExternalStringTag: {
+ case kAsciiStringTag | kExternalStringTag: {
CopyChars(sink,
ExternalAsciiString::cast(source)->GetChars() + from,
to - from);
@@ -7380,9 +6912,9 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kOneByteStringTag | kSeqStringTag: {
+ case kAsciiStringTag | kSeqStringTag: {
CopyChars(sink,
- SeqOneByteString::cast(source)->GetChars() + from,
+ SeqAsciiString::cast(source)->GetChars() + from,
to - from);
return;
}
@@ -7392,7 +6924,7 @@ void String::WriteToFlat(String* src,
to - from);
return;
}
- case kOneByteStringTag | kConsStringTag:
+ case kAsciiStringTag | kConsStringTag:
case kTwoByteStringTag | kConsStringTag: {
ConsString* cons_string = ConsString::cast(source);
String* first = cons_string->first();
@@ -7417,9 +6949,9 @@ void String::WriteToFlat(String* src,
// common case of sequential ascii right child.
if (to - boundary == 1) {
sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
- } else if (second->IsSeqOneByteString()) {
+ } else if (second->IsSeqAsciiString()) {
CopyChars(sink + boundary - from,
- SeqOneByteString::cast(second)->GetChars(),
+ SeqAsciiString::cast(second)->GetChars(),
to - boundary);
} else {
WriteToFlat(second,
@@ -7433,7 +6965,7 @@ void String::WriteToFlat(String* src,
}
break;
}
- case kOneByteStringTag | kSlicedStringTag:
+ case kAsciiStringTag | kSlicedStringTag:
case kTwoByteStringTag | kSlicedStringTag: {
SlicedString* slice = SlicedString::cast(source);
unsigned offset = slice->offset();
@@ -7558,8 +7090,8 @@ bool String::SlowEquals(String* other) {
if (StringShape(lhs).IsSequentialAscii() &&
StringShape(rhs).IsSequentialAscii()) {
- const char* str1 = SeqOneByteString::cast(lhs)->GetChars();
- const char* str2 = SeqOneByteString::cast(rhs)->GetChars();
+ const char* str1 = SeqAsciiString::cast(lhs)->GetChars();
+ const char* str2 = SeqAsciiString::cast(rhs)->GetChars();
return CompareRawStringContents(Vector<const char>(str1, len),
Vector<const char>(str2, len));
}
@@ -7687,7 +7219,7 @@ uint32_t String::ComputeAndSetHash() {
// Compute the hash code.
uint32_t field = 0;
if (StringShape(this).IsSequentialAscii()) {
- field = HashSequentialString(SeqOneByteString::cast(this)->GetChars(),
+ field = HashSequentialString(SeqAsciiString::cast(this)->GetChars(),
len,
GetHeap()->HashSeed());
} else if (StringShape(this).IsSequentialTwoByte()) {
@@ -7755,36 +7287,6 @@ bool String::SlowAsArrayIndex(uint32_t* index) {
}
-String* SeqString::Truncate(int new_length) {
- Heap* heap = GetHeap();
- if (new_length <= 0) return heap->empty_string();
-
- int string_size, allocated_string_size;
- int old_length = length();
- if (old_length <= new_length) return this;
-
- if (IsSeqOneByteString()) {
- allocated_string_size = SeqOneByteString::SizeFor(old_length);
- string_size = SeqOneByteString::SizeFor(new_length);
- } else {
- allocated_string_size = SeqTwoByteString::SizeFor(old_length);
- string_size = SeqTwoByteString::SizeFor(new_length);
- }
-
- int delta = allocated_string_size - string_size;
- set_length(new_length);
-
- // String sizes are pointer size aligned, so that we can use filler objects
- // that are a multiple of pointer size.
- Address end_of_string = address() + string_size;
- heap->CreateFillerObjectAt(end_of_string, delta);
- if (Marking::IsBlack(Marking::MarkBitFrom(this))) {
- MemoryChunk::IncrementLiveBytesFromMutator(address(), -delta);
- }
- return this;
-}
-
-
uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
// For array indexes mix the length into the hash as an array index could
// be zero.
@@ -7941,6 +7443,7 @@ void Map::ClearNonLiveTransitions(Heap* heap) {
if (ClearBackPointer(heap, target)) {
if (target->instance_descriptors() == descriptors) {
descriptors_owner_died = true;
+ descriptors_owner_died = true;
}
} else {
if (i != transition_index) {
@@ -8016,7 +7519,6 @@ bool Map::EquivalentToForNormalization(Map* other,
instance_type() == other->instance_type() &&
bit_field() == other->bit_field() &&
bit_field2() == other->bit_field2() &&
- is_observed() == other->is_observed() &&
function_with_prototype() == other->function_with_prototype();
}
@@ -8073,6 +7575,11 @@ bool SharedFunctionInfo::CompileLazy(Handle<SharedFunctionInfo> shared,
}
+void SharedFunctionInfo::ClearOptimizedCodeMap() {
+ set_optimized_code_map(Smi::FromInt(0));
+}
+
+
void SharedFunctionInfo::AddToOptimizedCodeMap(
Handle<SharedFunctionInfo> shared,
Handle<Context> native_context,
@@ -8192,35 +7699,6 @@ MaybeObject* JSObject::OptimizeAsPrototype() {
}
-MUST_USE_RESULT static MaybeObject* CacheInitialJSArrayMaps(
- Context* native_context, Map* initial_map) {
- // Replace all of the cached initial array maps in the native context with
- // the appropriate transitioned elements kind maps.
- Heap* heap = native_context->GetHeap();
- MaybeObject* maybe_maps =
- heap->AllocateFixedArrayWithHoles(kElementsKindCount);
- FixedArray* maps;
- if (!maybe_maps->To(&maps)) return maybe_maps;
-
- Map* current_map = initial_map;
- ElementsKind kind = current_map->elements_kind();
- ASSERT(kind == GetInitialFastElementsKind());
- maps->set(kind, current_map);
- for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
- i < kFastElementsKindCount; ++i) {
- Map* new_map;
- ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
- MaybeObject* maybe_new_map =
- current_map->CopyAsElementsKind(next_kind, INSERT_TRANSITION);
- if (!maybe_new_map->To(&new_map)) return maybe_new_map;
- maps->set(next_kind, new_map);
- current_map = new_map;
- }
- native_context->set_js_array_maps(maps);
- return initial_map;
-}
-
-
MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
ASSERT(value->IsJSReceiver());
Heap* heap = GetHeap();
@@ -8235,29 +7713,14 @@ MaybeObject* JSFunction::SetInstancePrototype(Object* value) {
// Now some logic for the maps of the objects that are created by using this
// function as a constructor.
if (has_initial_map()) {
- // If the function has allocated the initial map replace it with a
- // copy containing the new prototype. Also complete any in-object
- // slack tracking that is in progress at this point because it is
- // still tracking the old copy.
- if (shared()->IsInobjectSlackTrackingInProgress()) {
- shared()->CompleteInobjectSlackTracking();
- }
+ // If the function has allocated the initial map
+ // replace it with a copy containing the new prototype.
Map* new_map;
- MaybeObject* maybe_object = initial_map()->Copy();
- if (!maybe_object->To(&new_map)) return maybe_object;
+ MaybeObject* maybe_new_map = initial_map()->Copy();
+ if (!maybe_new_map->To(&new_map)) return maybe_new_map;
new_map->set_prototype(value);
-
- // If the function is used as the global Array function, cache the
- // initial map (and transitioned versions) in the native context.
- Context* native_context = context()->native_context();
- Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
- if (array_function->IsJSFunction() &&
- this == JSFunction::cast(array_function)) {
- MaybeObject* ok = CacheInitialJSArrayMaps(native_context, new_map);
- if (ok->IsFailure()) return ok;
- }
-
- set_initial_map(new_map);
+ MaybeObject* maybe_object = set_initial_map_and_cache_transitions(new_map);
+ if (maybe_object->IsFailure()) return maybe_object;
} else {
// Put the value in the initial map field until an initial map is
// needed. At that point, a new initial map is created and the
@@ -8562,7 +8025,7 @@ void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
// old code, we have to replace it. We should try to avoid this
// altogether because it flushes valuable type feedback by
// effectively resetting all IC state.
- ReplaceCode(recompiled);
+ set_code(recompiled);
}
ASSERT(has_deoptimization_support());
}
@@ -8642,7 +8105,7 @@ void SharedFunctionInfo::DetachInitialMap() {
// constructor is called. The countdown will continue and (possibly after
// several more GCs) CompleteInobjectSlackTracking will eventually be called.
Heap* heap = map->GetHeap();
- set_initial_map(heap->undefined_value());
+ set_initial_map(heap->raw_unchecked_undefined_value());
Builtins* builtins = heap->isolate()->builtins();
ASSERT_EQ(builtins->builtin(Builtins::kJSConstructStubCountdown),
*RawField(this, kConstructStubOffset));
@@ -8770,15 +8233,6 @@ void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
}
-void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
- ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
- Object* stub = rinfo->code_age_stub();
- if (stub) {
- VisitPointer(&stub);
- }
-}
-
-
void ObjectVisitor::VisitCodeEntry(Address entry_address) {
Object* code = Code::GetObjectFromEntryAddress(entry_address);
Object* old_code = code;
@@ -8973,7 +8427,6 @@ void Code::ClearInlineCaches() {
void Code::ClearTypeFeedbackCells(Heap* heap) {
- if (kind() != FUNCTION) return;
Object* raw_info = type_feedback_info();
if (raw_info->IsTypeFeedbackInfo()) {
TypeFeedbackCells* type_feedback_cells =
@@ -8988,92 +8441,7 @@ void Code::ClearTypeFeedbackCells(Heap* heap) {
bool Code::allowed_in_shared_map_code_cache() {
return is_keyed_load_stub() || is_keyed_store_stub() ||
- (is_compare_ic_stub() &&
- ICCompareStub::CompareState(stub_info()) == CompareIC::KNOWN_OBJECTS);
-}
-
-
-void Code::MakeCodeAgeSequenceYoung(byte* sequence) {
- PatchPlatformCodeAge(sequence, kNoAge, NO_MARKING_PARITY);
-}
-
-
-void Code::MakeOlder(MarkingParity current_parity) {
- byte* sequence = FindCodeAgeSequence();
- if (sequence != NULL) {
- Age age;
- MarkingParity code_parity;
- GetCodeAgeAndParity(sequence, &age, &code_parity);
- if (age != kLastCodeAge && code_parity != current_parity) {
- PatchPlatformCodeAge(sequence, static_cast<Age>(age + 1),
- current_parity);
- }
- }
-}
-
-
-bool Code::IsOld() {
- byte* sequence = FindCodeAgeSequence();
- if (sequence == NULL) return false;
- Age age;
- MarkingParity parity;
- GetCodeAgeAndParity(sequence, &age, &parity);
- return age >= kSexagenarianCodeAge;
-}
-
-
-byte* Code::FindCodeAgeSequence() {
- return FLAG_age_code &&
- prologue_offset() != kPrologueOffsetNotSet &&
- (kind() == OPTIMIZED_FUNCTION ||
- (kind() == FUNCTION && !has_debug_break_slots()))
- ? instruction_start() + prologue_offset()
- : NULL;
-}
-
-
-void Code::GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity) {
- Isolate* isolate = Isolate::Current();
- Builtins* builtins = isolate->builtins();
- Code* stub = NULL;
-#define HANDLE_CODE_AGE(AGE) \
- stub = *builtins->Make##AGE##CodeYoungAgainEvenMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = EVEN_MARKING_PARITY; \
- return; \
- } \
- stub = *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- if (code == stub) { \
- *age = k##AGE##CodeAge; \
- *parity = ODD_MARKING_PARITY; \
- return; \
- }
- CODE_AGE_LIST(HANDLE_CODE_AGE)
-#undef HANDLE_CODE_AGE
- UNREACHABLE();
-}
-
-
-Code* Code::GetCodeAgeStub(Age age, MarkingParity parity) {
- Isolate* isolate = Isolate::Current();
- Builtins* builtins = isolate->builtins();
- switch (age) {
-#define HANDLE_CODE_AGE(AGE) \
- case k##AGE##CodeAge: { \
- Code* stub = parity == EVEN_MARKING_PARITY \
- ? *builtins->Make##AGE##CodeYoungAgainEvenMarking() \
- : *builtins->Make##AGE##CodeYoungAgainOddMarking(); \
- return stub; \
- }
- CODE_AGE_LIST(HANDLE_CODE_AGE)
-#undef HANDLE_CODE_AGE
- default:
- UNREACHABLE();
- break;
- }
- return NULL;
+ (is_compare_ic_stub() && compare_state() == CompareIC::KNOWN_OBJECTS);
}
@@ -9335,15 +8703,11 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "argc = %d\n", arguments_count());
}
if (is_compare_ic_stub()) {
- ASSERT(major_key() == CodeStub::CompareIC);
- CompareIC::State left_state, right_state, handler_state;
- Token::Value op;
- ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
- &handler_state, &op);
- PrintF(out, "compare_state = %s*%s -> %s\n",
- CompareIC::GetStateName(left_state),
- CompareIC::GetStateName(right_state),
- CompareIC::GetStateName(handler_state));
+ CompareIC::State state = CompareIC::ComputeState(this);
+ PrintF(out, "compare_state = %s\n", CompareIC::GetStateName(state));
+ }
+ if (is_compare_ic_stub() && major_key() == CodeStub::CompareIC) {
+ Token::Value op = CompareIC::ComputeOperation(this);
PrintF(out, "compare_operation = %s\n", Token::Name(op));
}
}
@@ -9389,6 +8753,8 @@ void Code::Disassemble(const char* name, FILE* out) {
PrintF(out, "\n");
}
PrintF(out, "\n");
+ // Just print if type feedback info is ever used for optimized code.
+ ASSERT(type_feedback_info()->IsUndefined());
} else if (kind() == FUNCTION) {
unsigned offset = stack_check_table_offset();
// If there is no stack check table, the "table start" will at or after
@@ -9430,8 +8796,9 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
// Allocate a new fast elements backing store.
FixedArray* new_elements;
- MaybeObject* maybe = heap->AllocateUninitializedFixedArray(capacity);
- if (!maybe->To(&new_elements)) return maybe;
+ { MaybeObject* maybe = heap->AllocateFixedArrayWithHoles(capacity);
+ if (!maybe->To(&new_elements)) return maybe;
+ }
ElementsKind elements_kind = GetElementsKind();
ElementsKind new_elements_kind;
@@ -9455,10 +8822,10 @@ MaybeObject* JSObject::SetFastElementsCapacityAndLength(
}
FixedArrayBase* old_elements = elements();
ElementsAccessor* accessor = ElementsAccessor::ForKind(elements_kind);
- MaybeObject* maybe_obj =
- accessor->CopyElements(this, new_elements, new_elements_kind);
- if (maybe_obj->IsFailure()) return maybe_obj;
-
+ { MaybeObject* maybe_obj =
+ accessor->CopyElements(this, new_elements, new_elements_kind);
+ if (maybe_obj->IsFailure()) return maybe_obj;
+ }
if (elements_kind != NON_STRICT_ARGUMENTS_ELEMENTS) {
Map* new_map = map();
if (new_elements_kind != elements_kind) {
@@ -9564,53 +8931,7 @@ void JSArray::Expand(int required_size) {
MaybeObject* JSArray::SetElementsLength(Object* len) {
// We should never end in here with a pixel or external array.
ASSERT(AllowsSetElementsLength());
- if (!(FLAG_harmony_observation && map()->is_observed()))
- return GetElementsAccessor()->SetLength(this, len);
-
- Isolate* isolate = GetIsolate();
- HandleScope scope(isolate);
- Handle<JSArray> self(this);
- List<Handle<String> > indices;
- List<Handle<Object> > old_values;
- Handle<Object> old_length_handle(self->length());
- Handle<Object> new_length_handle(len);
- uint32_t old_length = 0;
- CHECK(old_length_handle->ToArrayIndex(&old_length));
- uint32_t new_length = 0;
- if (!new_length_handle->ToArrayIndex(&new_length))
- return Failure::InternalError();
-
- // TODO(adamk): This loop can be very slow for arrays in dictionary mode.
- // Find another way to iterate over arrays with dictionary elements.
- for (uint32_t i = old_length - 1; i + 1 > new_length; --i) {
- PropertyAttributes attributes = self->GetLocalElementAttribute(i);
- if (attributes == ABSENT) continue;
- // A non-configurable property will cause the truncation operation to
- // stop at this index.
- if (attributes == DONT_DELETE) break;
- old_values.Add(
- self->GetLocalElementAccessorPair(i) == NULL
- ? Object::GetElement(self, i)
- : Handle<Object>::cast(isolate->factory()->the_hole_value()));
- indices.Add(isolate->factory()->Uint32ToString(i));
- }
-
- MaybeObject* result =
- self->GetElementsAccessor()->SetLength(*self, *new_length_handle);
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- CHECK(self->length()->ToArrayIndex(&new_length));
- if (old_length != new_length) {
- for (int i = 0; i < indices.length(); ++i) {
- JSObject::EnqueueChangeRecord(
- self, "deleted", indices[i], old_values[i]);
- }
- JSObject::EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_symbol(),
- old_length_handle);
- }
- return *hresult;
+ return GetElementsAccessor()->SetLength(this, len);
}
@@ -9787,51 +9108,203 @@ MaybeObject* JSObject::EnsureCanContainElements(Arguments* args,
}
-PropertyType JSObject::GetLocalPropertyType(String* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementType(index);
+bool JSObject::HasElementWithInterceptor(JSReceiver* receiver, uint32_t index) {
+ Isolate* isolate = GetIsolate();
+ // Make sure that the top context does not change when doing
+ // callbacks or interceptor calls.
+ AssertNoContextChange ncc;
+ HandleScope scope(isolate);
+ Handle<InterceptorInfo> interceptor(GetIndexedInterceptor());
+ Handle<JSReceiver> receiver_handle(receiver);
+ Handle<JSObject> holder_handle(this);
+ CustomArguments args(isolate, interceptor->data(), receiver, this);
+ v8::AccessorInfo info(args.end());
+ if (!interceptor->query()->IsUndefined()) {
+ v8::IndexedPropertyQuery query =
+ v8::ToCData<v8::IndexedPropertyQuery>(interceptor->query());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has", this, index));
+ v8::Handle<v8::Integer> result;
+ {
+ // Leaving JavaScript.
+ VMState state(isolate, EXTERNAL);
+ result = query(index, info);
+ }
+ if (!result.IsEmpty()) {
+ ASSERT(result->IsInt32());
+ return true; // absence of property is signaled by empty handle.
+ }
+ } else if (!interceptor->getter()->IsUndefined()) {
+ v8::IndexedPropertyGetter getter =
+ v8::ToCData<v8::IndexedPropertyGetter>(interceptor->getter());
+ LOG(isolate,
+ ApiIndexedPropertyAccess("interceptor-indexed-has-get", this, index));
+ v8::Handle<v8::Value> result;
+ {
+ // Leaving JavaScript.
+ VMState state(isolate, EXTERNAL);
+ result = getter(index, info);
+ }
+ if (!result.IsEmpty()) return true;
}
- LookupResult lookup(GetIsolate());
- LocalLookup(name, &lookup, true);
- return lookup.type();
-}
+ if (holder_handle->GetElementsAccessor()->HasElement(
+ *receiver_handle, *holder_handle, index)) {
+ return true;
+ }
-PropertyType JSObject::GetLocalElementType(uint32_t index) {
- return GetElementsAccessor()->GetType(this, this, index);
+ if (holder_handle->IsStringObjectWithCharacterAt(index)) return true;
+ Object* pt = holder_handle->GetPrototype();
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+ receiver, index) != ABSENT;
+ }
+ if (pt->IsNull()) return false;
+ return JSObject::cast(pt)->HasElementWithReceiver(*receiver_handle, index);
}
-AccessorPair* JSObject::GetLocalPropertyAccessorPair(String* name) {
- uint32_t index = 0;
- if (name->AsArrayIndex(&index)) {
- return GetLocalElementAccessorPair(index);
+JSObject::LocalElementType JSObject::HasLocalElement(uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return UNDEFINED_ELEMENT;
+ }
}
- LookupResult lookup(GetIsolate());
- LocalLookupRealNamedProperty(name, &lookup);
+ if (IsJSGlobalProxy()) {
+ Object* proto = GetPrototype();
+ if (proto->IsNull()) return UNDEFINED_ELEMENT;
+ ASSERT(proto->IsJSGlobalObject());
+ return JSObject::cast(proto)->HasLocalElement(index);
+ }
- if (lookup.IsPropertyCallbacks() &&
- lookup.GetCallbackObject()->IsAccessorPair()) {
- return AccessorPair::cast(lookup.GetCallbackObject());
+ // Check for lookup interceptor
+ if (HasIndexedInterceptor()) {
+ return HasElementWithInterceptor(this, index) ? INTERCEPTED_ELEMENT
+ : UNDEFINED_ELEMENT;
}
- return NULL;
+
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) {
+ return STRING_CHARACTER_ELEMENT;
+ }
+
+ switch (GetElementsKind()) {
+ case FAST_SMI_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedArray::cast(elements())->get(index)->IsTheHole()) {
+ return FAST_ELEMENT;
+ }
+ break;
+ }
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS: {
+ uint32_t length = IsJSArray() ?
+ static_cast<uint32_t>
+ (Smi::cast(JSArray::cast(this)->length())->value()) :
+ static_cast<uint32_t>(FixedDoubleArray::cast(elements())->length());
+ if ((index < length) &&
+ !FixedDoubleArray::cast(elements())->is_the_hole(index)) {
+ return FAST_ELEMENT;
+ }
+ break;
+ }
+ case EXTERNAL_PIXEL_ELEMENTS: {
+ ExternalPixelArray* pixels = ExternalPixelArray::cast(elements());
+ if (index < static_cast<uint32_t>(pixels->length())) return FAST_ELEMENT;
+ break;
+ }
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS: {
+ ExternalArray* array = ExternalArray::cast(elements());
+ if (index < static_cast<uint32_t>(array->length())) return FAST_ELEMENT;
+ break;
+ }
+ case DICTIONARY_ELEMENTS: {
+ if (element_dictionary()->FindEntry(index) !=
+ SeededNumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ break;
+ }
+ case NON_STRICT_ARGUMENTS_ELEMENTS: {
+ // Aliased parameters and non-aliased elements in a fast backing store
+ // behave as FAST_ELEMENT. Non-aliased elements in a dictionary
+ // backing store behave as DICTIONARY_ELEMENT.
+ FixedArray* parameter_map = FixedArray::cast(elements());
+ uint32_t length = parameter_map->length();
+ Object* probe =
+ index < (length - 2) ? parameter_map->get(index + 2) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ // If not aliased, check the arguments.
+ FixedArray* arguments = FixedArray::cast(parameter_map->get(1));
+ if (arguments->IsDictionary()) {
+ SeededNumberDictionary* dictionary =
+ SeededNumberDictionary::cast(arguments);
+ if (dictionary->FindEntry(index) != SeededNumberDictionary::kNotFound) {
+ return DICTIONARY_ELEMENT;
+ }
+ } else {
+ length = arguments->length();
+ probe = (index < length) ? arguments->get(index) : NULL;
+ if (probe != NULL && !probe->IsTheHole()) return FAST_ELEMENT;
+ }
+ break;
+ }
+ }
+
+ return UNDEFINED_ELEMENT;
}
-AccessorPair* JSObject::GetLocalElementAccessorPair(uint32_t index) {
- if (IsJSGlobalProxy()) {
- Object* proto = GetPrototype();
- if (proto->IsNull()) return NULL;
- ASSERT(proto->IsJSGlobalObject());
- return JSObject::cast(proto)->GetLocalElementAccessorPair(index);
+bool JSObject::HasElementWithReceiver(JSReceiver* receiver, uint32_t index) {
+ // Check access rights if needed.
+ if (IsAccessCheckNeeded()) {
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_HAS)) {
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_HAS);
+ return false;
+ }
+ }
+
+ // Check for lookup interceptor
+ if (HasIndexedInterceptor()) {
+ return HasElementWithInterceptor(receiver, index);
+ }
+
+ ElementsAccessor* accessor = GetElementsAccessor();
+ if (accessor->HasElement(receiver, this, index)) {
+ return true;
}
- // Check for lookup interceptor.
- if (HasIndexedInterceptor()) return NULL;
+ // Handle [] on String objects.
+ if (this->IsStringObjectWithCharacterAt(index)) return true;
- return GetElementsAccessor()->GetAccessorPair(this, this, index);
+ Object* pt = GetPrototype();
+ if (pt->IsNull()) return false;
+ if (pt->IsJSProxy()) {
+ // We need to follow the spec and simulate a call to [[GetOwnProperty]].
+ return JSProxy::cast(pt)->GetElementAttributeWithHandler(
+ receiver, index) != ABSENT;
+ }
+ return JSObject::cast(pt)->HasElementWithReceiver(receiver, index);
}
@@ -10140,7 +9613,7 @@ MaybeObject* JSObject::SetFastElement(uint32_t index,
MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
- Object* value_raw,
+ Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
@@ -10148,23 +9621,24 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
Isolate* isolate = GetIsolate();
Heap* heap = isolate->heap();
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw);
// Insert element in the dictionary.
- Handle<FixedArray> elements(FixedArray::cast(this->elements()));
+ FixedArray* elements = FixedArray::cast(this->elements());
bool is_arguments =
(elements->map() == heap->non_strict_arguments_elements_map());
- Handle<SeededNumberDictionary> dictionary(is_arguments
- ? SeededNumberDictionary::cast(elements->get(1))
- : SeededNumberDictionary::cast(*elements));
+ SeededNumberDictionary* dictionary = NULL;
+ if (is_arguments) {
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = SeededNumberDictionary::cast(elements);
+ }
int entry = dictionary->FindEntry(index);
if (entry != SeededNumberDictionary::kNotFound) {
Object* element = dictionary->ValueAt(entry);
PropertyDetails details = dictionary->DetailsAt(entry);
if (details.type() == CALLBACKS && set_mode == SET_PROPERTY) {
- return SetElementWithCallback(element, index, *value, this, strict_mode);
+ return SetElementWithCallback(element, index, value, this, strict_mode);
} else {
dictionary->UpdateMaxNumberKey(index);
// If a value has not been initialized we allow writing to it even if it
@@ -10193,24 +9667,24 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
Context* context = Context::cast(elements->get(0));
int context_index = entry->aliased_context_slot();
ASSERT(!context->get(context_index)->IsTheHole());
- context->set(context_index, *value);
+ context->set(context_index, value);
// For elements that are still writable we keep slow aliasing.
- if (!details.IsReadOnly()) value = handle(element, isolate);
+ if (!details.IsReadOnly()) value = element;
}
- dictionary->ValueAtPut(entry, *value);
+ dictionary->ValueAtPut(entry, value);
}
} else {
// Index not already used. Look for an accessor in the prototype chain.
- // Can cause GC!
if (check_prototype) {
bool found;
- MaybeObject* result = SetElementWithCallbackSetterInPrototypes(
- index, *value, &found, strict_mode);
+ MaybeObject* result =
+ SetElementWithCallbackSetterInPrototypes(
+ index, value, &found, strict_mode);
if (found) return result;
}
// When we set the is_extensible flag to false we always force the
// element into dictionary mode (and force them to stay there).
- if (!self->map()->is_extensible()) {
+ if (!map()->is_extensible()) {
if (strict_mode == kNonStrictMode) {
return isolate->heap()->undefined_value();
} else {
@@ -10225,31 +9699,30 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
FixedArrayBase* new_dictionary;
PropertyDetails details = PropertyDetails(attributes, NORMAL);
- MaybeObject* maybe = dictionary->AddNumberEntry(index, *value, details);
+ MaybeObject* maybe = dictionary->AddNumberEntry(index, value, details);
if (!maybe->To(&new_dictionary)) return maybe;
- if (*dictionary != SeededNumberDictionary::cast(new_dictionary)) {
+ if (dictionary != SeededNumberDictionary::cast(new_dictionary)) {
if (is_arguments) {
elements->set(1, new_dictionary);
} else {
- self->set_elements(new_dictionary);
+ set_elements(new_dictionary);
}
- dictionary =
- handle(SeededNumberDictionary::cast(new_dictionary), isolate);
+ dictionary = SeededNumberDictionary::cast(new_dictionary);
}
}
// Update the array length if this JSObject is an array.
- if (self->IsJSArray()) {
+ if (IsJSArray()) {
MaybeObject* result =
- JSArray::cast(*self)->JSArrayUpdateLengthFromIndex(index, *value);
+ JSArray::cast(this)->JSArrayUpdateLengthFromIndex(index, value);
if (result->IsFailure()) return result;
}
// Attempt to put this object back in fast case.
- if (self->ShouldConvertToFastElements()) {
+ if (ShouldConvertToFastElements()) {
uint32_t new_length = 0;
- if (self->IsJSArray()) {
- CHECK(JSArray::cast(*self)->length()->ToArrayIndex(&new_length));
+ if (IsJSArray()) {
+ CHECK(JSArray::cast(this)->length()->ToArrayIndex(&new_length));
} else {
new_length = dictionary->max_number_key() + 1;
}
@@ -10258,15 +9731,16 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
: kDontAllowSmiElements;
bool has_smi_only_elements = false;
bool should_convert_to_fast_double_elements =
- self->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
+ ShouldConvertToFastDoubleElements(&has_smi_only_elements);
if (has_smi_only_elements) {
smi_mode = kForceSmiElements;
}
MaybeObject* result = should_convert_to_fast_double_elements
- ? self->SetFastDoubleElementsCapacityAndLength(new_length, new_length)
- : self->SetFastElementsCapacityAndLength(
- new_length, new_length, smi_mode);
- self->ValidateElements();
+ ? SetFastDoubleElementsCapacityAndLength(new_length, new_length)
+ : SetFastElementsCapacityAndLength(new_length,
+ new_length,
+ smi_mode);
+ ValidateElements();
if (result->IsFailure()) return result;
#ifdef DEBUG
if (FLAG_trace_normalization) {
@@ -10275,7 +9749,7 @@ MaybeObject* JSObject::SetDictionaryElement(uint32_t index,
}
#endif
}
- return *value;
+ return value;
}
@@ -10431,27 +9905,28 @@ Handle<Object> JSObject::SetElement(Handle<JSObject> object,
MaybeObject* JSObject::SetElement(uint32_t index,
- Object* value_raw,
+ Object* value,
PropertyAttributes attributes,
StrictModeFlag strict_mode,
bool check_prototype,
SetPropertyMode set_mode) {
- Isolate* isolate = GetIsolate();
-
// Check access rights if needed.
if (IsAccessCheckNeeded()) {
- if (!isolate->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
- isolate->ReportFailedAccessCheck(this, v8::ACCESS_SET);
- return value_raw;
+ Heap* heap = GetHeap();
+ if (!heap->isolate()->MayIndexedAccess(this, index, v8::ACCESS_SET)) {
+ HandleScope scope(heap->isolate());
+ Handle<Object> value_handle(value);
+ heap->isolate()->ReportFailedAccessCheck(this, v8::ACCESS_SET);
+ return *value_handle;
}
}
if (IsJSGlobalProxy()) {
Object* proto = GetPrototype();
- if (proto->IsNull()) return value_raw;
+ if (proto->IsNull()) return value;
ASSERT(proto->IsJSGlobalObject());
return JSObject::cast(proto)->SetElement(index,
- value_raw,
+ value,
attributes,
strict_mode,
check_prototype,
@@ -10460,8 +9935,10 @@ MaybeObject* JSObject::SetElement(uint32_t index,
// Don't allow element properties to be redefined for external arrays.
if (HasExternalArrayElements() && set_mode == DEFINE_PROPERTY) {
+ Isolate* isolate = GetHeap()->isolate();
+ Handle<Object> receiver(this);
Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
- Handle<Object> args[] = { handle(this, isolate), number };
+ Handle<Object> args[] = { receiver, number };
Handle<Object> error = isolate->factory()->NewTypeError(
"redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
return isolate->Throw(*error);
@@ -10476,55 +9953,22 @@ MaybeObject* JSObject::SetElement(uint32_t index,
dictionary->set_requires_slow_elements();
}
- if (!(FLAG_harmony_observation && map()->is_observed())) {
- return HasIndexedInterceptor()
- ? SetElementWithInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode)
- : SetElementWithoutInterceptor(
- index, value_raw, attributes, strict_mode, check_prototype, set_mode);
- }
-
- // From here on, everything has to be handlified.
- Handle<JSObject> self(this);
- Handle<Object> value(value_raw);
- PropertyAttributes old_attributes = self->GetLocalElementAttribute(index);
- Handle<Object> old_value = isolate->factory()->the_hole_value();
- Handle<Object> old_length;
-
- if (old_attributes != ABSENT) {
- if (self->GetLocalElementAccessorPair(index) == NULL)
- old_value = Object::GetElement(self, index);
- } else if (self->IsJSArray()) {
- // Store old array length in case adding an element grows the array.
- old_length = handle(Handle<JSArray>::cast(self)->length(), isolate);
- }
-
// Check for lookup interceptor
- MaybeObject* result = self->HasIndexedInterceptor()
- ? self->SetElementWithInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode)
- : self->SetElementWithoutInterceptor(
- index, *value, attributes, strict_mode, check_prototype, set_mode);
-
- Handle<Object> hresult;
- if (!result->ToHandle(&hresult, isolate)) return result;
-
- Handle<String> name = isolate->factory()->Uint32ToString(index);
- PropertyAttributes new_attributes = self->GetLocalElementAttribute(index);
- if (old_attributes == ABSENT) {
- EnqueueChangeRecord(self, "new", name, old_value);
- if (self->IsJSArray() &&
- !old_length->SameValue(Handle<JSArray>::cast(self)->length())) {
- EnqueueChangeRecord(
- self, "updated", isolate->factory()->length_symbol(), old_length);
- }
- } else if (old_attributes != new_attributes || old_value->IsTheHole()) {
- EnqueueChangeRecord(self, "reconfigured", name, old_value);
- } else if (!old_value->SameValue(*Object::GetElement(self, index))) {
- EnqueueChangeRecord(self, "updated", name, old_value);
+ if (HasIndexedInterceptor()) {
+ return SetElementWithInterceptor(index,
+ value,
+ attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
- return *hresult;
+ return SetElementWithoutInterceptor(index,
+ value,
+ attributes,
+ strict_mode,
+ check_prototype,
+ set_mode);
}
@@ -10640,8 +10084,6 @@ MaybeObject* JSObject::TransitionElementsKind(ElementsKind to_kind) {
to_kind = GetHoleyElementsKind(to_kind);
}
- if (from_kind == to_kind) return this;
-
Isolate* isolate = GetIsolate();
if (elements() == isolate->heap()->empty_fixed_array() ||
(IsFastSmiOrObjectElementsKind(from_kind) &&
@@ -11743,10 +11185,11 @@ class AsciiSymbolKey : public SequentialSymbolKey<char> {
class SubStringAsciiSymbolKey : public HashTableKey {
public:
- explicit SubStringAsciiSymbolKey(Handle<SeqOneByteString> string,
+ explicit SubStringAsciiSymbolKey(Handle<SeqAsciiString> string,
int from,
- int length)
- : string_(string), from_(from), length_(length) { }
+ int length,
+ uint32_t seed)
+ : string_(string), from_(from), length_(length), seed_(seed) { }
uint32_t Hash() {
ASSERT(length_ >= 0);
@@ -11763,7 +11206,7 @@ class SubStringAsciiSymbolKey : public HashTableKey {
// chance this is an array index.
while (i < length_ && hasher.is_array_index()) {
hasher.AddCharacter(static_cast<uc32>(
- string_->SeqOneByteStringGet(i + from_)));
+ string_->SeqAsciiStringGet(i + from_)));
i++;
}
@@ -11771,7 +11214,7 @@ class SubStringAsciiSymbolKey : public HashTableKey {
// index.
while (i < length_) {
hasher.AddCharacterNoIndex(static_cast<uc32>(
- string_->SeqOneByteStringGet(i + from_)));
+ string_->SeqAsciiStringGet(i + from_)));
i++;
}
hash_field_ = hasher.GetHashField();
@@ -11799,10 +11242,11 @@ class SubStringAsciiSymbolKey : public HashTableKey {
}
private:
- Handle<SeqOneByteString> string_;
+ Handle<SeqAsciiString> string_;
int from_;
int length_;
uint32_t hash_field_;
+ uint32_t seed_;
};
@@ -12723,12 +12167,11 @@ MaybeObject* SymbolTable::LookupAsciiSymbol(Vector<const char> str,
}
-MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(
- Handle<SeqOneByteString> str,
- int from,
- int length,
- Object** s) {
- SubStringAsciiSymbolKey key(str, from, length);
+MaybeObject* SymbolTable::LookupSubStringAsciiSymbol(Handle<SeqAsciiString> str,
+ int from,
+ int length,
+ Object** s) {
+ SubStringAsciiSymbolKey key(str, from, length, GetHeap()->HashSeed());
return LookupKey(&key, s);
}
@@ -13416,7 +12859,8 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
PropertyType type = DetailsAt(i).type();
ASSERT(type != FIELD);
instance_descriptor_length++;
- if (type == NORMAL && !value->IsJSFunction()) {
+ if (type == NORMAL &&
+ (!value->IsJSFunction() || heap->InNewSpace(value))) {
number_of_fields += 1;
}
}
@@ -13481,7 +12925,7 @@ MaybeObject* StringDictionary::TransformPropertiesToFastFor(
int enumeration_index = details.descriptor_index();
PropertyType type = details.type();
- if (value->IsJSFunction()) {
+ if (value->IsJSFunction() && !heap->InNewSpace(value)) {
ConstantFunctionDescriptor d(key,
JSFunction::cast(value),
details.attributes(),
diff --git a/deps/v8/src/objects.h b/deps/v8/src/objects.h
index 0be263a48..0d1a69cb9 100644
--- a/deps/v8/src/objects.h
+++ b/deps/v8/src/objects.h
@@ -97,7 +97,7 @@
// - ExternalFloatArray
// - String
// - SeqString
-// - SeqOneByteString
+// - SeqAsciiString
// - SeqTwoByteString
// - SlicedString
// - ConsString
@@ -194,18 +194,6 @@ enum DescriptorFlag {
OWN_DESCRIPTORS
};
-// The GC maintains a bit of information, the MarkingParity, which toggles
-// from odd to even and back every time marking is completed. Incremental
-// marking can visit an object twice during a marking phase, so algorithms that
-// that piggy-back on marking can use the parity to ensure that they only
-// perform an operation on an object once per marking phase: they record the
-// MarkingParity when they visit an object, and only re-visit the object when it
-// is marked again and the MarkingParity changes.
-enum MarkingParity {
- NO_MARKING_PARITY,
- ODD_MARKING_PARITY,
- EVEN_MARKING_PARITY
-};
// Instance size sentinel for objects of variable size.
const int kVariableSizeSentinel = 0;
@@ -479,7 +467,7 @@ const uint32_t kSymbolTag = 0x40;
// two-byte characters or one-byte characters.
const uint32_t kStringEncodingMask = 0x4;
const uint32_t kTwoByteStringTag = 0x0;
-const uint32_t kOneByteStringTag = 0x4;
+const uint32_t kAsciiStringTag = 0x4;
// If bit 7 is clear, the low-order 2 bits indicate the representation
// of the string.
@@ -530,46 +518,39 @@ const uint32_t kShortcutTypeTag = kConsStringTag;
enum InstanceType {
// String types.
SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kSeqStringTag,
- ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSymbolTag |
- kSeqStringTag,
+ ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kSeqStringTag,
CONS_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kConsStringTag,
- CONS_ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSymbolTag |
- kConsStringTag,
+ CONS_ASCII_SYMBOL_TYPE = kAsciiStringTag | kSymbolTag | kConsStringTag,
SHORT_EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag |
kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
- SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kOneByteStringTag | kAsciiDataHintTag |
- kExternalStringTag | kSymbolTag |
- kShortExternalStringTag,
+ SHORT_EXTERNAL_ASCII_SYMBOL_TYPE = kAsciiStringTag | kExternalStringTag |
+ kSymbolTag | kShortExternalStringTag,
EXTERNAL_SYMBOL_TYPE = kTwoByteStringTag | kSymbolTag | kExternalStringTag,
EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kSymbolTag | kExternalStringTag | kAsciiDataHintTag,
EXTERNAL_ASCII_SYMBOL_TYPE =
- kOneByteStringTag | kAsciiDataHintTag | kSymbolTag | kExternalStringTag,
+ kAsciiStringTag | kSymbolTag | kExternalStringTag,
STRING_TYPE = kTwoByteStringTag | kSeqStringTag,
- ASCII_STRING_TYPE = kOneByteStringTag | kAsciiDataHintTag | kSeqStringTag,
+ ASCII_STRING_TYPE = kAsciiStringTag | kSeqStringTag,
CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag,
- CONS_ASCII_STRING_TYPE =
- kOneByteStringTag | kAsciiDataHintTag | kConsStringTag,
+ CONS_ASCII_STRING_TYPE = kAsciiStringTag | kConsStringTag,
SLICED_STRING_TYPE = kTwoByteStringTag | kSlicedStringTag,
- SLICED_ASCII_STRING_TYPE =
- kOneByteStringTag | kAsciiDataHintTag | kSlicedStringTag,
+ SLICED_ASCII_STRING_TYPE = kAsciiStringTag | kSlicedStringTag,
SHORT_EXTERNAL_STRING_TYPE =
kTwoByteStringTag | kExternalStringTag | kShortExternalStringTag,
SHORT_EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag |
kAsciiDataHintTag | kShortExternalStringTag,
SHORT_EXTERNAL_ASCII_STRING_TYPE =
- kOneByteStringTag | kAsciiDataHintTag |
- kExternalStringTag | kShortExternalStringTag,
+ kAsciiStringTag | kExternalStringTag | kShortExternalStringTag,
EXTERNAL_STRING_TYPE = kTwoByteStringTag | kExternalStringTag,
EXTERNAL_STRING_WITH_ASCII_DATA_TYPE =
kTwoByteStringTag | kExternalStringTag | kAsciiDataHintTag,
// LAST_STRING_TYPE
- EXTERNAL_ASCII_STRING_TYPE =
- kOneByteStringTag | kAsciiDataHintTag | kExternalStringTag,
+ EXTERNAL_ASCII_STRING_TYPE = kAsciiStringTag | kExternalStringTag,
PRIVATE_EXTERNAL_ASCII_STRING_TYPE = EXTERNAL_ASCII_STRING_TYPE,
// Objects allocated in their own spaces (never in new space).
@@ -773,9 +754,7 @@ class MaybeObject BASE_EMBEDDED {
return reinterpret_cast<Failure*>(this);
}
inline Object* ToObjectUnchecked() {
- // TODO(jkummerow): Turn this back into an ASSERT when we can be certain
- // that it never fires in Release mode in the wild.
- CHECK(!IsFailure());
+ ASSERT(!IsFailure());
return reinterpret_cast<Object*>(this);
}
inline Object* ToObjectChecked() {
@@ -790,13 +769,6 @@ class MaybeObject BASE_EMBEDDED {
return true;
}
- template<typename T>
- inline bool ToHandle(Handle<T>* obj, Isolate* isolate) {
- if (IsFailure()) return false;
- *obj = handle(T::cast(reinterpret_cast<Object*>(this)), isolate);
- return true;
- }
-
#ifdef OBJECT_PRINT
// Prints this object with details.
inline void Print() {
@@ -831,7 +803,7 @@ class MaybeObject BASE_EMBEDDED {
V(ExternalTwoByteString) \
V(ExternalAsciiString) \
V(SeqTwoByteString) \
- V(SeqOneByteString) \
+ V(SeqAsciiString) \
\
V(ExternalArray) \
V(ExternalByteArray) \
@@ -894,7 +866,6 @@ class MaybeObject BASE_EMBEDDED {
V(UndetectableObject) \
V(AccessCheckNeeded) \
V(JSGlobalPropertyCell) \
- V(ObjectHashTable) \
class JSReceiver;
@@ -916,7 +887,6 @@ class Object : public MaybeObject {
#undef IS_TYPE_FUNCTION_DECL
inline bool IsFixedArrayBase();
- inline bool IsExternal();
// Returns true if this object is an instance of the specified
// function template.
@@ -975,7 +945,6 @@ class Object : public MaybeObject {
String* key,
PropertyAttributes* attributes);
- static Handle<Object> GetProperty(Handle<Object> object, Handle<String> key);
static Handle<Object> GetProperty(Handle<Object> object,
Handle<Object> receiver,
LookupResult* result,
@@ -1493,14 +1462,10 @@ class JSReceiver: public HeapObject {
String* name);
PropertyAttributes GetLocalPropertyAttribute(String* name);
- inline PropertyAttributes GetElementAttribute(uint32_t index);
- inline PropertyAttributes GetLocalElementAttribute(uint32_t index);
-
// Can cause a GC.
inline bool HasProperty(String* name);
inline bool HasLocalProperty(String* name);
inline bool HasElement(uint32_t index);
- inline bool HasLocalElement(uint32_t index);
// Return the object's prototype (might be Heap::null_value()).
inline Object* GetPrototype();
@@ -1518,18 +1483,17 @@ class JSReceiver: public HeapObject {
// Lookup a property. If found, the result is valid and has
// detailed information.
- void LocalLookup(String* name, LookupResult* result,
- bool search_hidden_prototypes = false);
+ void LocalLookup(String* name, LookupResult* result);
void Lookup(String* name, LookupResult* result);
protected:
Smi* GenerateIdentityHash();
private:
- PropertyAttributes GetPropertyAttributeForResult(JSReceiver* receiver,
- LookupResult* result,
- String* name,
- bool continue_search);
+ PropertyAttributes GetPropertyAttribute(JSReceiver* receiver,
+ LookupResult* result,
+ String* name,
+ bool continue_search);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
};
@@ -1578,8 +1542,6 @@ class JSObject: public JSReceiver {
// Returns true if an object has elements of FAST_ELEMENTS or
// FAST_SMI_ONLY_ELEMENTS.
inline bool HasFastSmiOrObjectElements();
- // Returns true if an object has any of the fast elements kinds.
- inline bool HasFastElements();
// Returns true if an object has elements of FAST_DOUBLE_ELEMENTS
// ElementsKind.
inline bool HasFastDoubleElements();
@@ -1718,16 +1680,12 @@ class JSObject: public JSReceiver {
LookupResult* result,
String* name,
bool continue_search);
- PropertyAttributes GetElementAttributeWithReceiver(JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
static void DefineAccessor(Handle<JSObject> object,
Handle<String> name,
Handle<Object> getter,
Handle<Object> setter,
PropertyAttributes attributes);
- // Can cause GC.
MUST_USE_RESULT MaybeObject* DefineAccessor(String* name,
Object* getter,
Object* setter,
@@ -1803,7 +1761,6 @@ class JSObject: public JSReceiver {
static Handle<Object> DeleteProperty(Handle<JSObject> obj,
Handle<String> name);
- // Can cause GC.
MUST_USE_RESULT MaybeObject* DeleteProperty(String* name, DeleteMode mode);
static Handle<Object> DeleteElement(Handle<JSObject> obj, uint32_t index);
@@ -1842,18 +1799,36 @@ class JSObject: public JSReceiver {
// be represented as a double and not a Smi.
bool ShouldConvertToFastDoubleElements(bool* has_smi_only_elements);
+ // Tells whether the index'th element is present.
+ bool HasElementWithReceiver(JSReceiver* receiver, uint32_t index);
+
// Computes the new capacity when expanding the elements of a JSObject.
static int NewElementsCapacity(int old_capacity) {
// (old_capacity + 50%) + 16
return old_capacity + (old_capacity >> 1) + 16;
}
- PropertyType GetLocalPropertyType(String* name);
- PropertyType GetLocalElementType(uint32_t index);
+ // Tells whether the index'th element is present and how it is stored.
+ enum LocalElementType {
+ // There is no element with given index.
+ UNDEFINED_ELEMENT,
- // These methods do not perform access checks!
- AccessorPair* GetLocalPropertyAccessorPair(String* name);
- AccessorPair* GetLocalElementAccessorPair(uint32_t index);
+ // Element with given index is handled by interceptor.
+ INTERCEPTED_ELEMENT,
+
+ // Element with given index is character in string.
+ STRING_CHARACTER_ELEMENT,
+
+ // Element with given index is stored in fast backing store.
+ FAST_ELEMENT,
+
+ // Element with given index is stored in slow backing store.
+ DICTIONARY_ELEMENT
+ };
+
+ LocalElementType HasLocalElement(uint32_t index);
+
+ bool HasElementWithInterceptor(JSReceiver* receiver, uint32_t index);
MUST_USE_RESULT MaybeObject* SetFastElement(uint32_t index,
Object* value,
@@ -1880,7 +1855,7 @@ class JSObject: public JSReceiver {
StrictModeFlag strict_mode);
// Empty handle is returned if the element cannot be set to the given value.
- static Handle<Object> SetElement(
+ static MUST_USE_RESULT Handle<Object> SetElement(
Handle<JSObject> object,
uint32_t index,
Handle<Object> value,
@@ -2033,7 +2008,7 @@ class JSObject: public JSReceiver {
Object* value,
PropertyAttributes attributes);
- // Add a property to an object. May cause GC.
+ // Add a property to an object.
MUST_USE_RESULT MaybeObject* AddProperty(
String* name,
Object* value,
@@ -2204,15 +2179,6 @@ class JSObject: public JSReceiver {
static inline int SizeOf(Map* map, HeapObject* object);
};
- // Enqueue change record for Object.observe. May cause GC.
- static void EnqueueChangeRecord(Handle<JSObject> object,
- const char* type,
- Handle<String> name,
- Handle<Object> old_value);
-
- // Deliver change records to observers. May cause GC.
- static void DeliverChangeRecords(Isolate* isolate);
-
private:
friend class DictionaryElementsAccessor;
@@ -2220,14 +2186,6 @@ class JSObject: public JSReceiver {
Object* structure,
uint32_t index,
Object* holder);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithInterceptor(
- JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
- MUST_USE_RESULT PropertyAttributes GetElementAttributeWithoutInterceptor(
- JSReceiver* receiver,
- uint32_t index,
- bool continue_search);
MUST_USE_RESULT MaybeObject* SetElementWithCallback(
Object* structure,
uint32_t index,
@@ -2372,12 +2330,12 @@ class FixedArray: public FixedArrayBase {
inline void set_unchecked(Heap* heap, int index, Object* value,
WriteBarrierMode mode);
- inline Object** GetFirstElementAddress();
- inline bool ContainsOnlySmisOrHoles();
-
// Gives access to raw memory which stores the array's data.
inline Object** data_start();
+ inline Object** GetFirstElementAddress();
+ inline bool ContainsOnlySmisOrHoles();
+
// Copy operations.
MUST_USE_RESULT inline MaybeObject* Copy();
MUST_USE_RESULT MaybeObject* CopySize(int new_length);
@@ -2452,8 +2410,6 @@ class FixedArray: public FixedArrayBase {
Object* value);
private:
- STATIC_CHECK(kHeaderSize == Internals::kFixedArrayHeaderSize);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(FixedArray);
};
@@ -2479,9 +2435,6 @@ class FixedDoubleArray: public FixedArrayBase {
return kHeaderSize + length * kDoubleSize;
}
- // Gives access to raw memory which stores the array's data.
- inline double* data_start();
-
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
@@ -3023,7 +2976,7 @@ class SymbolTableShape : public BaseShape<HashTableKey*> {
static const int kEntrySize = 1;
};
-class SeqOneByteString;
+class SeqAsciiString;
// SymbolTable.
//
@@ -3039,7 +2992,7 @@ class SymbolTable: public HashTable<SymbolTableShape, HashTableKey*> {
MUST_USE_RESULT MaybeObject* LookupAsciiSymbol(Vector<const char> str,
Object** s);
MUST_USE_RESULT MaybeObject* LookupSubStringAsciiSymbol(
- Handle<SeqOneByteString> str,
+ Handle<SeqAsciiString> str,
int from,
int length,
Object** s);
@@ -4309,12 +4262,8 @@ class Code: public HeapObject {
DECL_ACCESSORS(deoptimization_data, FixedArray)
// [type_feedback_info]: Struct containing type feedback information.
- // STUBs can use this slot to store arbitrary information as a Smi.
- // Will contain either a TypeFeedbackInfo object, or undefined, or a Smi.
+ // Will contain either a TypeFeedbackInfo object, or undefined.
DECL_ACCESSORS(type_feedback_info, Object)
- inline void InitializeTypeFeedbackInfoNoWriteBarrier(Object* value);
- inline int stub_info();
- inline void set_stub_info(int info);
// [gc_metadata]: Field used to hold GC related metadata. The contents of this
// field does not have to be traced during garbage collection since
@@ -4326,11 +4275,6 @@ class Code: public HeapObject {
inline void set_ic_age(int count);
inline int ic_age();
- // [prologue_offset]: Offset of the function prologue, used for aging
- // FUNCTIONs and OPTIMIZED_FUNCTIONs.
- inline int prologue_offset();
- inline void set_prologue_offset(int offset);
-
// Unchecked accessors to be used during GC.
inline ByteArray* unchecked_relocation_info();
inline FixedArray* unchecked_deoptimization_data();
@@ -4425,6 +4369,21 @@ class Code: public HeapObject {
inline byte unary_op_type();
inline void set_unary_op_type(byte value);
+ // [type-recording binary op type]: For kind BINARY_OP_IC.
+ inline byte binary_op_type();
+ inline void set_binary_op_type(byte value);
+ inline byte binary_op_result_type();
+ inline void set_binary_op_result_type(byte value);
+
+ // [compare state]: For kind COMPARE_IC, tells what state the stub is in.
+ inline byte compare_state();
+ inline void set_compare_state(byte value);
+
+ // [compare_operation]: For kind COMPARE_IC tells what compare operation the
+ // stub was generated for.
+ inline byte compare_operation();
+ inline void set_compare_operation(byte value);
+
// [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
inline byte to_boolean_state();
inline void set_to_boolean_state(byte value);
@@ -4563,22 +4522,6 @@ class Code: public HeapObject {
void ClearInlineCaches();
void ClearTypeFeedbackCells(Heap* heap);
-#define DECLARE_CODE_AGE_ENUM(X) k##X##CodeAge,
- enum Age {
- kNoAge = 0,
- CODE_AGE_LIST(DECLARE_CODE_AGE_ENUM)
- kAfterLastCodeAge,
- kLastCodeAge = kAfterLastCodeAge - 1,
- kCodeAgeCount = kAfterLastCodeAge - 1
- };
-#undef DECLARE_CODE_AGE_ENUM
-
- // Code aging
- static void MakeCodeAgeSequenceYoung(byte* sequence);
- void MakeOlder(MarkingParity);
- static bool IsYoungSequence(byte* sequence);
- bool IsOld();
-
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
@@ -4598,10 +4541,8 @@ class Code: public HeapObject {
static const int kKindSpecificFlags1Offset = kFlagsOffset + kIntSize;
static const int kKindSpecificFlags2Offset =
kKindSpecificFlags1Offset + kIntSize;
- // Note: We might be able to squeeze this into the flags above.
- static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
- static const int kHeaderPaddingStart = kPrologueOffset + kIntSize;
+ static const int kHeaderPaddingStart = kKindSpecificFlags2Offset + kIntSize;
// Add padding to align the instruction start following right after
// the Code object header.
@@ -4635,6 +4576,18 @@ class Code: public HeapObject {
static const int kUnaryOpTypeFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kUnaryOpTypeBitCount = 3;
+ static const int kBinaryOpTypeFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kBinaryOpTypeBitCount = 3;
+ static const int kBinaryOpResultTypeFirstBit =
+ kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount;
+ static const int kBinaryOpResultTypeBitCount = 3;
+ static const int kCompareStateFirstBit =
+ kStackSlotsFirstBit + kStackSlotsBitCount;
+ static const int kCompareStateBitCount = 3;
+ static const int kCompareOperationFirstBit =
+ kCompareStateFirstBit + kCompareStateBitCount;
+ static const int kCompareOperationBitCount = 4;
static const int kToBooleanStateFirstBit =
kStackSlotsFirstBit + kStackSlotsBitCount;
static const int kToBooleanStateBitCount = 8;
@@ -4644,6 +4597,11 @@ class Code: public HeapObject {
STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
STATIC_ASSERT(kUnaryOpTypeFirstBit + kUnaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpTypeFirstBit + kBinaryOpTypeBitCount <= 32);
+ STATIC_ASSERT(kBinaryOpResultTypeFirstBit +
+ kBinaryOpResultTypeBitCount <= 32);
+ STATIC_ASSERT(kCompareStateFirstBit + kCompareStateBitCount <= 32);
+ STATIC_ASSERT(kCompareOperationFirstBit + kCompareOperationBitCount <= 32);
STATIC_ASSERT(kToBooleanStateFirstBit + kToBooleanStateBitCount <= 32);
STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
@@ -4651,6 +4609,14 @@ class Code: public HeapObject {
kStackSlotsFirstBit, kStackSlotsBitCount> {}; // NOLINT
class UnaryOpTypeField: public BitField<int,
kUnaryOpTypeFirstBit, kUnaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpTypeField: public BitField<int,
+ kBinaryOpTypeFirstBit, kBinaryOpTypeBitCount> {}; // NOLINT
+ class BinaryOpResultTypeField: public BitField<int,
+ kBinaryOpResultTypeFirstBit, kBinaryOpResultTypeBitCount> {}; // NOLINT
+ class CompareStateField: public BitField<int,
+ kCompareStateFirstBit, kCompareStateBitCount> {}; // NOLINT
+ class CompareOperationField: public BitField<int,
+ kCompareOperationFirstBit, kCompareOperationBitCount> {}; // NOLINT
class ToBooleanStateField: public BitField<int,
kToBooleanStateFirstBit, kToBooleanStateBitCount> {}; // NOLINT
class HasFunctionCacheField: public BitField<bool,
@@ -4684,20 +4650,6 @@ class Code: public HeapObject {
TypeField::kMask | CacheHolderField::kMask;
private:
- friend class RelocIterator;
-
- // Code aging
- byte* FindCodeAgeSequence();
- static void GetCodeAgeAndParity(Code* code, Age* age,
- MarkingParity* parity);
- static void GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity);
- static Code* GetCodeAgeStub(Age age, MarkingParity parity);
-
- // Code aging -- platform-specific
- static void PatchPlatformCodeAge(byte* sequence, Age age,
- MarkingParity parity);
-
DISALLOW_IMPLICIT_CONSTRUCTORS(Code);
};
@@ -4749,7 +4701,6 @@ class Map: public HeapObject {
class FunctionWithPrototype: public BitField<bool, 23, 1> {};
class DictionaryMap: public BitField<bool, 24, 1> {};
class OwnsDescriptors: public BitField<bool, 25, 1> {};
- class IsObserved: public BitField<bool, 26, 1> {};
// Tells whether the object in the prototype property will be used
// for instances created from this function. If the prototype
@@ -5017,8 +4968,6 @@ class Map: public HeapObject {
inline bool owns_descriptors();
inline void set_owns_descriptors(bool is_shared);
- inline bool is_observed();
- inline void set_is_observed(bool is_observed);
MUST_USE_RESULT MaybeObject* RawCopy(int instance_size);
MUST_USE_RESULT MaybeObject* CopyWithPreallocatedFieldDescriptors();
@@ -5429,7 +5378,6 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code.
DECL_ACCESSORS(code, Code)
- inline void ReplaceCode(Code* code);
// [optimized_code_map]: Map from native context to optimized code
// and a shared literals array or Smi 0 if none.
@@ -5445,7 +5393,7 @@ class SharedFunctionInfo: public HeapObject {
void InstallFromOptimizedCodeMap(JSFunction* function, int index);
// Clear optimized code map.
- inline void ClearOptimizedCodeMap();
+ void ClearOptimizedCodeMap();
// Add a new entry to the optimized code map.
static void AddToOptimizedCodeMap(Handle<SharedFunctionInfo> shared,
@@ -6163,6 +6111,8 @@ class JSFunction: public JSObject {
// The initial map for an object created by this constructor.
inline Map* initial_map();
inline void set_initial_map(Map* value);
+ MUST_USE_RESULT inline MaybeObject* set_initial_map_and_cache_transitions(
+ Map* value);
inline bool has_initial_map();
// Get and set the prototype property on a JSFunction. If the
@@ -6325,7 +6275,7 @@ class GlobalObject: public JSObject {
Handle<GlobalObject> global,
Handle<String> name);
// TODO(kmillikin): This function can be eliminated once the stub cache is
- // fully handlified (and the static helper can be written directly).
+ // full handlified (and the static helper can be written directly).
MUST_USE_RESULT MaybeObject* EnsurePropertyCell(String* name);
// Casting.
@@ -7170,8 +7120,6 @@ class StringShape BASE_EMBEDDED {
// All string values have a length field.
class String: public HeapObject {
public:
- enum Encoding { ONE_BYTE_ENCODING, TWO_BYTE_ENCODING };
-
// Representation of the flat content of a String.
// A non-flat string doesn't have flat content.
// A flat string has content that's encoded as a sequence of either
@@ -7229,13 +7177,13 @@ class String: public HeapObject {
// be ASCII encoded. This might be the case even if the string is
// two-byte. Such strings may appear when the embedder prefers
// two-byte external representations even for ASCII data.
- inline bool IsOneByteRepresentation();
+ inline bool IsAsciiRepresentation();
inline bool IsTwoByteRepresentation();
// Cons and slices have an encoding flag that may not represent the actual
// encoding of the underlying string. This is taken into account here.
// Requires: this->IsFlat()
- inline bool IsOneByteRepresentationUnderneath();
+ inline bool IsAsciiRepresentationUnderneath();
inline bool IsTwoByteRepresentationUnderneath();
// NOTE: this should be considered only a hint. False negatives are
@@ -7510,14 +7458,6 @@ class String: public HeapObject {
return NonAsciiStart(chars, length) >= length;
}
- template<class Visitor, class ConsOp>
- static inline void Visit(String* string,
- unsigned offset,
- Visitor& visitor,
- ConsOp& consOp,
- int32_t type,
- unsigned length);
-
protected:
class ReadBlockBuffer {
public:
@@ -7576,11 +7516,6 @@ class SeqString: public String {
// Layout description.
static const int kHeaderSize = String::kSize;
- // Truncate the string in-place if possible and return the result.
- // In case of new_length == 0, the empty string is returned without
- // truncating the original string.
- MUST_USE_RESULT String* Truncate(int new_length);
-
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(SeqString);
};
@@ -7588,13 +7523,13 @@ class SeqString: public String {
// The AsciiString class captures sequential ASCII string objects.
// Each character in the AsciiString is an ASCII character.
-class SeqOneByteString: public SeqString {
+class SeqAsciiString: public SeqString {
public:
static const bool kHasAsciiEncoding = true;
// Dispatched behavior.
- inline uint16_t SeqOneByteStringGet(int index);
- inline void SeqOneByteStringSet(int index, uint16_t value);
+ inline uint16_t SeqAsciiStringGet(int index);
+ inline void SeqAsciiStringSet(int index, uint16_t value);
// Get the address of the characters in this string.
inline Address GetCharsAddress();
@@ -7602,12 +7537,12 @@ class SeqOneByteString: public SeqString {
inline char* GetChars();
// Casting
- static inline SeqOneByteString* cast(Object* obj);
+ static inline SeqAsciiString* cast(Object* obj);
// Garbage collection support. This method is called by the
// garbage collector to compute the actual size of an AsciiString
// instance.
- inline int SeqOneByteStringSize(InstanceType instance_type);
+ inline int SeqAsciiStringSize(InstanceType instance_type);
// Computes the size for an AsciiString instance of a given length.
static int SizeFor(int length) {
@@ -7621,17 +7556,17 @@ class SeqOneByteString: public SeqString {
static const int kMaxLength = (kMaxSize - kHeaderSize);
// Support for StringInputBuffer.
- inline void SeqOneByteStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
+ inline void SeqAsciiStringReadBlockIntoBuffer(ReadBlockBuffer* buffer,
unsigned* offset,
unsigned chars);
- inline const unibrow::byte* SeqOneByteStringReadBlock(unsigned* remaining,
+ inline const unibrow::byte* SeqAsciiStringReadBlock(unsigned* remaining,
unsigned* offset,
unsigned chars);
- DECLARE_VERIFIER(SeqOneByteString)
+ DECLARE_VERIFIER(SeqAsciiString)
private:
- DISALLOW_IMPLICIT_CONSTRUCTORS(SeqOneByteString);
+ DISALLOW_IMPLICIT_CONSTRUCTORS(SeqAsciiString);
};
@@ -7972,75 +7907,14 @@ class StringInputBuffer: public unibrow::InputBuffer<String, String*, 1024> {
};
-// This maintains an off-stack representation of the stack frames required
-// to traverse a ConsString, allowing an entirely iterative and restartable
-// traversal of the entire string
-// Note: this class is not GC-safe.
-class ConsStringIteratorOp {
+class SafeStringInputBuffer
+ : public unibrow::InputBuffer<String, String**, 256> {
public:
- struct ContinueResponse {
- String* string_;
- unsigned offset_;
- unsigned length_;
- int32_t type_;
- };
- inline ConsStringIteratorOp() {}
- String* Operate(ConsString* consString, unsigned* outerOffset,
- int32_t* typeOut, unsigned* lengthOut);
- inline bool ContinueOperation(ContinueResponse* response);
- inline void Reset();
- inline bool HasMore();
-
- private:
- // TODO(dcarney): Templatize this out for different stack sizes.
- static const unsigned kStackSize = 32;
- // Use a mask instead of doing modulo operations for stack wrapping.
- static const unsigned kDepthMask = kStackSize-1;
- STATIC_ASSERT(IS_POWER_OF_TWO(kStackSize));
- static inline unsigned OffsetForDepth(unsigned depth);
- static inline uint32_t MaskForDepth(unsigned depth);
-
- inline void ClearRightDescent();
- inline void SetRightDescent();
- inline void PushLeft(ConsString* string);
- inline void PushRight(ConsString* string, int32_t type);
- inline void AdjustMaximumDepth();
- inline void Pop();
- inline void ResetStack();
- String* NextLeaf(bool* blewStack, int32_t* typeOut);
-
- unsigned depth_;
- unsigned maximum_depth_;
- uint32_t trace_;
- ConsString* frames_[kStackSize];
- unsigned consumed_;
- ConsString* root_;
- int32_t root_type_;
- unsigned root_length_;
- DISALLOW_COPY_AND_ASSIGN(ConsStringIteratorOp);
-};
-
-
-// Note: this class is not GC-safe.
-class StringCharacterStream {
- public:
- inline StringCharacterStream(
- String* string, unsigned offset, ConsStringIteratorOp* op);
- inline uint16_t GetNext();
- inline bool HasMore();
- inline void Reset(String* string, unsigned offset, ConsStringIteratorOp* op);
- inline void VisitOneByteString(const uint8_t* chars, unsigned length);
- inline void VisitTwoByteString(const uint16_t* chars, unsigned length);
-
- private:
- bool is_one_byte_;
- union {
- const uint8_t* buffer8_;
- const uint16_t* buffer16_;
- };
- const uint8_t* end_;
- ConsStringIteratorOp* op_;
- DISALLOW_COPY_AND_ASSIGN(StringCharacterStream);
+ virtual void Seek(unsigned pos);
+ inline SafeStringInputBuffer()
+ : unibrow::InputBuffer<String, String**, 256>() {}
+ explicit inline SafeStringInputBuffer(String** backing)
+ : unibrow::InputBuffer<String, String**, 256>(backing) {}
};
@@ -8430,7 +8304,6 @@ class JSArray: public JSObject {
// Initializes the array to a certain length.
inline bool AllowsSetElementsLength();
- // Can cause GC.
MUST_USE_RESULT MaybeObject* SetElementsLength(Object* length);
// Set the content of the array to the content of storage.
@@ -9024,10 +8897,6 @@ class ObjectVisitor BASE_EMBEDDED {
// Visits a debug call target in the instruction stream.
virtual void VisitDebugTarget(RelocInfo* rinfo);
- // Visits the byte sequence in a function's prologue that contains information
- // about the code's age.
- virtual void VisitCodeAgeSequence(RelocInfo* rinfo);
-
// Handy shorthand for visiting a single pointer.
virtual void VisitPointer(Object** p) { VisitPointers(p, p + 1); }
diff --git a/deps/v8/src/optimizing-compiler-thread.cc b/deps/v8/src/optimizing-compiler-thread.cc
index e41c352f4..06018dd1a 100644
--- a/deps/v8/src/optimizing-compiler-thread.cc
+++ b/deps/v8/src/optimizing-compiler-thread.cc
@@ -48,8 +48,6 @@ void OptimizingCompilerThread::Run() {
while (true) {
input_queue_semaphore_->Wait();
- Logger::TimerEventScope timer(
- isolate_, Logger::TimerEventScope::v8_recompile_parallel);
if (Acquire_Load(&stop_thread_)) {
stop_semaphore_->Signal();
if (FLAG_trace_parallel_recompilation) {
@@ -74,13 +72,7 @@ void OptimizingCompilerThread::Run() {
USE(status);
output_queue_.Enqueue(optimizing_compiler);
- if (!FLAG_manual_parallel_recompilation) {
- isolate_->stack_guard()->RequestCodeReadyEvent();
- } else {
- // In manual mode, do not trigger a code ready event.
- // Instead, wait for the optimized functions to be installed manually.
- output_queue_semaphore_->Signal();
- }
+ isolate_->stack_guard()->RequestCodeReadyEvent();
if (FLAG_trace_parallel_recompilation) {
time_spent_compiling_ += OS::Ticks() - compiling_start;
@@ -107,9 +99,6 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
HandleScope handle_scope(isolate_);
int functions_installed = 0;
while (!output_queue_.IsEmpty()) {
- if (FLAG_manual_parallel_recompilation) {
- output_queue_semaphore_->Wait();
- }
OptimizingCompiler* compiler = NULL;
output_queue_.Dequeue(&compiler);
Compiler::InstallOptimizedCode(compiler);
@@ -121,18 +110,6 @@ void OptimizingCompilerThread::InstallOptimizedFunctions() {
}
-Handle<SharedFunctionInfo>
- OptimizingCompilerThread::InstallNextOptimizedFunction() {
- ASSERT(FLAG_manual_parallel_recompilation);
- output_queue_semaphore_->Wait();
- OptimizingCompiler* compiler = NULL;
- output_queue_.Dequeue(&compiler);
- Handle<SharedFunctionInfo> shared = compiler->info()->shared_info();
- Compiler::InstallOptimizedCode(compiler);
- return shared;
-}
-
-
void OptimizingCompilerThread::QueueForOptimization(
OptimizingCompiler* optimizing_compiler) {
input_queue_.Enqueue(optimizing_compiler);
diff --git a/deps/v8/src/optimizing-compiler-thread.h b/deps/v8/src/optimizing-compiler-thread.h
index 2d56d1a72..d5627266d 100644
--- a/deps/v8/src/optimizing-compiler-thread.h
+++ b/deps/v8/src/optimizing-compiler-thread.h
@@ -29,8 +29,8 @@
#define V8_OPTIMIZING_COMPILER_THREAD_H_
#include "atomicops.h"
-#include "flags.h"
#include "platform.h"
+#include "flags.h"
#include "unbound-queue.h"
namespace v8 {
@@ -38,19 +38,14 @@ namespace internal {
class HGraphBuilder;
class OptimizingCompiler;
-class SharedFunctionInfo;
class OptimizingCompilerThread : public Thread {
public:
explicit OptimizingCompilerThread(Isolate *isolate) :
Thread("OptimizingCompilerThread"),
-#ifdef DEBUG
- thread_id_(0),
-#endif
isolate_(isolate),
stop_semaphore_(OS::CreateSemaphore(0)),
input_queue_semaphore_(OS::CreateSemaphore(0)),
- output_queue_semaphore_(OS::CreateSemaphore(0)),
time_spent_compiling_(0),
time_spent_total_(0) {
NoBarrier_Store(&stop_thread_, static_cast<AtomicWord>(false));
@@ -62,9 +57,6 @@ class OptimizingCompilerThread : public Thread {
void QueueForOptimization(OptimizingCompiler* optimizing_compiler);
void InstallOptimizedFunctions();
- // Wait for the next optimized function and install it.
- Handle<SharedFunctionInfo> InstallNextOptimizedFunction();
-
inline bool IsQueueAvailable() {
// We don't need a barrier since we have a data dependency right
// after.
@@ -84,26 +76,24 @@ class OptimizingCompilerThread : public Thread {
#endif
~OptimizingCompilerThread() {
- delete output_queue_semaphore_; // Only used for manual mode.
delete input_queue_semaphore_;
delete stop_semaphore_;
}
private:
-#ifdef DEBUG
- int thread_id_;
-#endif
-
Isolate* isolate_;
Semaphore* stop_semaphore_;
Semaphore* input_queue_semaphore_;
- Semaphore* output_queue_semaphore_;
UnboundQueue<OptimizingCompiler*> input_queue_;
UnboundQueue<OptimizingCompiler*> output_queue_;
volatile AtomicWord stop_thread_;
volatile Atomic32 queue_length_;
int64_t time_spent_compiling_;
int64_t time_spent_total_;
+
+#ifdef DEBUG
+ int thread_id_;
+#endif
};
} } // namespace v8::internal
diff --git a/deps/v8/src/parser.cc b/deps/v8/src/parser.cc
index 94d2f9e6a..129bd9546 100644
--- a/deps/v8/src/parser.cc
+++ b/deps/v8/src/parser.cc
@@ -614,6 +614,11 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
ASSERT(target_stack_ == NULL);
if (pre_data_ != NULL) pre_data_->Initialize();
+ // Compute the parsing mode.
+ Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
+ if (allow_natives_syntax_ || extension_ != NULL) mode = PARSE_EAGERLY;
+ ParsingModeScope parsing_mode(this, mode);
+
Handle<String> no_name = isolate()->factory()->empty_symbol();
FunctionLiteral* result = NULL;
@@ -632,13 +637,6 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
scope->set_start_position(0);
scope->set_end_position(source->length());
- // Compute the parsing mode.
- Mode mode = (FLAG_lazy && allow_lazy_) ? PARSE_LAZILY : PARSE_EAGERLY;
- if (allow_natives_syntax_ || extension_ != NULL || scope->is_eval_scope()) {
- mode = PARSE_EAGERLY;
- }
- ParsingModeScope parsing_mode(this, mode);
-
FunctionState function_state(this, scope, isolate()); // Enters 'scope'.
top_scope_->SetLanguageMode(info->language_mode());
ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
@@ -1061,14 +1059,12 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
// as specified in ES5 10.4.2(3). The correct fix would be to always
// add this scope in DoParseProgram(), but that requires adaptations
// all over the code base, so we go with a quick-fix for now.
- // In the same manner, we have to patch the parsing mode.
if (is_eval && !top_scope_->is_eval_scope()) {
ASSERT(top_scope_->is_global_scope());
Scope* scope = NewScope(top_scope_, EVAL_SCOPE);
scope->set_start_position(top_scope_->start_position());
scope->set_end_position(top_scope_->end_position());
top_scope_ = scope;
- mode_ = PARSE_EAGERLY;
}
// TODO(ES6): Fix entering extended mode, once it is specified.
top_scope_->SetLanguageMode(FLAG_harmony_scoping
@@ -1164,7 +1160,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
#endif
Module* module = ParseModule(CHECK_OK);
- VariableProxy* proxy = NewUnresolved(name, MODULE, module->interface());
+ VariableProxy* proxy = NewUnresolved(name, LET, module->interface());
Declaration* declaration =
factory()->NewModuleDeclaration(proxy, module, top_scope_);
Declare(declaration, true, CHECK_OK);
@@ -1183,7 +1179,7 @@ Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
if (module->body() == NULL)
return factory()->NewEmptyStatement();
else
- return factory()->NewModuleStatement(proxy, module->body());
+ return module->body();
}
@@ -1332,15 +1328,12 @@ Module* Parser::ParseModuleUrl(bool* ok) {
if (FLAG_print_interface_details) PrintF("# Url ");
#endif
- // Create an empty literal as long as the feature isn't finished.
- USE(symbol);
- Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
- Block* body = factory()->NewBlock(NULL, 1, false);
- body->set_scope(scope);
- Interface* interface = scope->interface();
- Module* result = factory()->NewModuleLiteral(body, interface);
+ Module* result = factory()->NewModuleUrl(symbol);
+ Interface* interface = result->interface();
interface->Freeze(ok);
ASSERT(*ok);
+ // Create dummy scope to avoid errors as long as the feature isn't finished.
+ Scope* scope = NewScope(top_scope_, MODULE_SCOPE);
interface->Unify(scope->interface(), zone(), ok);
ASSERT(*ok);
return result;
@@ -1709,9 +1702,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
*ok = false;
return;
}
+ const char* type =
+ (var->mode() == VAR) ? "var" : var->is_const_mode() ? "const" : "let";
Handle<String> type_string =
- isolate()->factory()->NewStringFromUtf8(CStrVector("Variable"),
- TENURED);
+ isolate()->factory()->NewStringFromUtf8(CStrVector(type), TENURED);
Expression* expression =
NewThrowTypeError(isolate()->factory()->redeclaration_symbol(),
type_string, name);
@@ -3209,8 +3203,7 @@ Expression* Parser::ParseUnaryExpression(bool* ok) {
if (op == Token::NOT) {
// Convert the literal to a boolean condition and negate it.
bool condition = literal->ToBoolean()->IsTrue();
- Handle<Object> result(isolate()->heap()->ToBoolean(!condition),
- isolate());
+ Handle<Object> result(isolate()->heap()->ToBoolean(!condition));
return factory()->NewLiteral(result);
} else if (literal->IsNumber()) {
// Compute some expressions involving only number literals.
@@ -3718,16 +3711,17 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
int literal_index = current_function_state_->NextMaterializedLiteralIndex();
// Allocate a fixed array to hold all the object literals.
- Handle<JSArray> array =
- isolate()->factory()->NewJSArray(0, FAST_HOLEY_SMI_ELEMENTS);
- isolate()->factory()->SetElementsCapacityAndLength(
- array, values->length(), values->length());
+ Handle<FixedArray> object_literals =
+ isolate()->factory()->NewFixedArray(values->length(), TENURED);
+ Handle<FixedDoubleArray> double_literals;
+ ElementsKind elements_kind = FAST_SMI_ELEMENTS;
+ bool has_only_undefined_values = true;
+ bool has_hole_values = false;
// Fill in the literals.
Heap* heap = isolate()->heap();
bool is_simple = true;
int depth = 1;
- bool is_holey = false;
for (int i = 0, n = values->length(); i < n; i++) {
MaterializedLiteral* m_literal = values->at(i)->AsMaterializedLiteral();
if (m_literal != NULL && m_literal->depth() + 1 > depth) {
@@ -3735,33 +3729,91 @@ Expression* Parser::ParseArrayLiteral(bool* ok) {
}
Handle<Object> boilerplate_value = GetBoilerplateValue(values->at(i));
if (boilerplate_value->IsTheHole()) {
- is_holey = true;
+ has_hole_values = true;
+ object_literals->set_the_hole(i);
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ double_literals->set_the_hole(i);
+ }
} else if (boilerplate_value->IsUndefined()) {
is_simple = false;
- JSObject::SetOwnElement(
- array, i, handle(Smi::FromInt(0), isolate()), kNonStrictMode);
+ object_literals->set(i, Smi::FromInt(0));
+ if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ double_literals->set(i, 0);
+ }
} else {
- JSObject::SetOwnElement(array, i, boilerplate_value, kNonStrictMode);
+ // Examine each literal element, and adjust the ElementsKind if the
+ // literal element is not of a type that can be stored in the current
+ // ElementsKind. Start with FAST_SMI_ONLY_ELEMENTS, and transition to
+ // FAST_DOUBLE_ELEMENTS and FAST_ELEMENTS as necessary. Always remember
+ // the tagged value, no matter what the ElementsKind is in case we
+ // ultimately end up in FAST_ELEMENTS.
+ has_only_undefined_values = false;
+ object_literals->set(i, *boilerplate_value);
+ if (elements_kind == FAST_SMI_ELEMENTS) {
+ // Smi only elements. Notice if a transition to FAST_DOUBLE_ELEMENTS or
+ // FAST_ELEMENTS is required.
+ if (!boilerplate_value->IsSmi()) {
+ if (boilerplate_value->IsNumber() && FLAG_smi_only_arrays) {
+ // Allocate a double array on the FAST_DOUBLE_ELEMENTS transition to
+ // avoid over-allocating in TENURED space.
+ double_literals = isolate()->factory()->NewFixedDoubleArray(
+ values->length(), TENURED);
+ // Copy the contents of the FAST_SMI_ONLY_ELEMENT array to the
+ // FAST_DOUBLE_ELEMENTS array so that they are in sync.
+ for (int j = 0; j < i; ++j) {
+ Object* smi_value = object_literals->get(j);
+ if (smi_value->IsTheHole()) {
+ double_literals->set_the_hole(j);
+ } else {
+ double_literals->set(j, Smi::cast(smi_value)->value());
+ }
+ }
+ double_literals->set(i, boilerplate_value->Number());
+ elements_kind = FAST_DOUBLE_ELEMENTS;
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
+ } else if (elements_kind == FAST_DOUBLE_ELEMENTS) {
+ // Continue to store double values in to FAST_DOUBLE_ELEMENTS arrays
+ // until the first value is seen that can't be stored as a double.
+ if (boilerplate_value->IsNumber()) {
+ double_literals->set(i, boilerplate_value->Number());
+ } else {
+ elements_kind = FAST_ELEMENTS;
+ }
+ }
}
}
- Handle<FixedArrayBase> element_values(array->elements());
+ // Very small array literals that don't have a concrete hint about their type
+ // from a constant value should default to the slow case to avoid lots of
+ // elements transitions on really small objects.
+ if (has_only_undefined_values && values->length() <= 2) {
+ elements_kind = FAST_ELEMENTS;
+ }
// Simple and shallow arrays can be lazily copied, we transform the
// elements array to a copy-on-write array.
if (is_simple && depth == 1 && values->length() > 0 &&
- array->HasFastSmiOrObjectElements()) {
- element_values->set_map(heap->fixed_cow_array_map());
+ elements_kind != FAST_DOUBLE_ELEMENTS) {
+ object_literals->set_map(heap->fixed_cow_array_map());
}
+ Handle<FixedArrayBase> element_values = elements_kind == FAST_DOUBLE_ELEMENTS
+ ? Handle<FixedArrayBase>(double_literals)
+ : Handle<FixedArrayBase>(object_literals);
+
// Remember both the literal's constant values as well as the ElementsKind
// in a 2-element FixedArray.
- Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(2, TENURED);
+ Handle<FixedArray> literals =
+ isolate()->factory()->NewFixedArray(2, TENURED);
- ElementsKind kind = array->GetElementsKind();
- kind = is_holey ? GetHoleyElementsKind(kind) : GetPackedElementsKind(kind);
+ if (has_hole_values || !FLAG_packed_arrays) {
+ elements_kind = GetHoleyElementsKind(elements_kind);
+ }
- literals->set(0, Smi::FromInt(kind));
+ literals->set(0, Smi::FromInt(elements_kind));
literals->set(1, *element_values);
return factory()->NewArrayLiteral(
diff --git a/deps/v8/src/parser.h b/deps/v8/src/parser.h
index 0f85f9158..93fd1b8aa 100644
--- a/deps/v8/src/parser.h
+++ b/deps/v8/src/parser.h
@@ -96,6 +96,7 @@ class FunctionEntry BASE_EMBEDDED {
private:
Vector<unsigned> backing_;
+ bool owns_data_;
};
diff --git a/deps/v8/src/platform-cygwin.cc b/deps/v8/src/platform-cygwin.cc
index 8c5e5b908..089ea38d9 100644
--- a/deps/v8/src/platform-cygwin.cc
+++ b/deps/v8/src/platform-cygwin.cc
@@ -359,12 +359,6 @@ bool VirtualMemory::Guard(void* address) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -655,13 +649,24 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
OS::Sleep(interval_);
}
}
@@ -674,6 +679,11 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
diff --git a/deps/v8/src/platform-freebsd.cc b/deps/v8/src/platform-freebsd.cc
index d02d66842..511759c48 100644
--- a/deps/v8/src/platform-freebsd.cc
+++ b/deps/v8/src/platform-freebsd.cc
@@ -456,12 +456,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
pthread_t thread_; // Thread handle for pthread.
@@ -712,6 +706,11 @@ static void ProfilerSignalHandler(int signal, siginfo_t* info, void* context) {
class SignalSender : public Thread {
public:
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -762,14 +761,38 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
}
}
@@ -779,15 +802,21 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep() {
+ void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
diff --git a/deps/v8/src/platform-linux.cc b/deps/v8/src/platform-linux.cc
index 7a186413b..beb2ccee2 100644
--- a/deps/v8/src/platform-linux.cc
+++ b/deps/v8/src/platform-linux.cc
@@ -722,11 +722,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- return true;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -1091,6 +1086,11 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -1146,16 +1146,43 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
}
}
@@ -1165,6 +1192,11 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SendProfilingSignal(int tid) {
if (!signal_handler_installed_) return;
// Glibc doesn't provide a wrapper for tgkill(2).
@@ -1175,10 +1207,11 @@ class SignalSender : public Thread {
#endif
}
- void Sleep() {
+ void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
#if defined(ANDROID)
usleep(interval);
#else
diff --git a/deps/v8/src/platform-macos.cc b/deps/v8/src/platform-macos.cc
index e69833885..a216f6e4c 100644
--- a/deps/v8/src/platform-macos.cc
+++ b/deps/v8/src/platform-macos.cc
@@ -471,11 +471,6 @@ bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -787,13 +782,24 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
OS::Sleep(interval_);
}
}
@@ -806,6 +812,11 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SampleContext(Sampler* sampler) {
thread_act_t profiled_thread = sampler->platform_data()->profiled_thread();
TickSample sample_obj;
diff --git a/deps/v8/src/platform-nullos.cc b/deps/v8/src/platform-nullos.cc
index ccd21231e..7aaa7b204 100644
--- a/deps/v8/src/platform-nullos.cc
+++ b/deps/v8/src/platform-nullos.cc
@@ -340,12 +340,6 @@ bool VirtualMemory::Guard(void* address) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() {
diff --git a/deps/v8/src/platform-openbsd.cc b/deps/v8/src/platform-openbsd.cc
index d4ab9a66e..408d4dc0f 100644
--- a/deps/v8/src/platform-openbsd.cc
+++ b/deps/v8/src/platform-openbsd.cc
@@ -504,12 +504,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) {}
@@ -784,6 +778,11 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -839,16 +838,43 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
}
}
@@ -858,15 +884,21 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep() {
+ void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
diff --git a/deps/v8/src/platform-posix.cc b/deps/v8/src/platform-posix.cc
index e652a083c..3bc83733c 100644
--- a/deps/v8/src/platform-posix.cc
+++ b/deps/v8/src/platform-posix.cc
@@ -109,20 +109,11 @@ void* OS::GetRandomMmapAddr() {
raw_addr &= V8_UINT64_C(0x3ffffffff000);
#else
uint32_t raw_addr = V8::RandomPrivate(isolate);
-
- // For our 32-bit mmap() hint, we pick a random address in the bottom
- // half of the top half of the address space (that is, the third quarter).
- // Because we do not MAP_FIXED, this will be treated only as a hint -- the
- // system will not fail to mmap() because something else happens to already
- // be mapped at our random address. We deliberately set the hint high enough
- // to get well above the system's break (that is, the heap); systems will
- // either try the hint and if that fails move higher (MacOS and other BSD
- // derivatives) or try the hint and if that fails allocate as if there were
- // no hint at all (Linux, Solaris, illumos and derivatives). The high hint
- // prevents the break from getting hemmed in at low values, ceding half of
- // the address space to the system heap.
+ // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+ // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+ // 10.6 and 10.7.
raw_addr &= 0x3ffff000;
- raw_addr += 0x80000000;
+ raw_addr += 0x20000000;
#endif
return reinterpret_cast<void*>(raw_addr);
}
@@ -151,19 +142,11 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
#undef MATH_FUNCTION
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
double OS::nan_value() {
// NAN from math.h is defined in C99 and not in POSIX.
return NAN;
@@ -349,7 +332,6 @@ void POSIXPostSetUp() {
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
- // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
diff --git a/deps/v8/src/platform-solaris.cc b/deps/v8/src/platform-solaris.cc
index 70f86596e..4248ea214 100644
--- a/deps/v8/src/platform-solaris.cc
+++ b/deps/v8/src/platform-solaris.cc
@@ -125,8 +125,12 @@ const char* OS::LocalTimezone(double time) {
double OS::LocalTimeOffset() {
- tzset();
- return -static_cast<double>(timezone * msPerSecond);
+ // On Solaris, struct tm does not contain a tm_gmtoff field.
+ time_t utc = time(NULL);
+ ASSERT(utc != -1);
+ struct tm* loc = localtime(&utc);
+ ASSERT(loc != NULL);
+ return static_cast<double>((mktime(loc) - utc) * msPerSecond);
}
@@ -444,12 +448,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
class Thread::PlatformData : public Malloced {
public:
PlatformData() : thread_(kNoThread) { }
@@ -701,6 +699,11 @@ class Sampler::PlatformData : public Malloced {
class SignalSender : public Thread {
public:
+ enum SleepInterval {
+ HALF_INTERVAL,
+ FULL_INTERVAL
+ };
+
static const int kSignalSenderStackSize = 64 * KB;
explicit SignalSender(int interval)
@@ -755,16 +758,44 @@ class SignalSender : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
+ if (cpu_profiling_enabled && !signal_handler_installed_) {
+ InstallSignalHandler();
+ } else if (!cpu_profiling_enabled && signal_handler_installed_) {
+ RestoreSignalHandler();
+ }
+
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- if (!signal_handler_installed_) InstallSignalHandler();
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
- if (signal_handler_installed_) RestoreSignalHandler();
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
- Sleep(); // TODO(svenpanne) Figure out if OS:Sleep(interval_) is enough.
+ if (cpu_profiling_enabled && runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ Sleep(HALF_INTERVAL);
+ } else {
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile,
+ this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile,
+ NULL)) {
+ return;
+ }
+ }
+ Sleep(FULL_INTERVAL);
+ }
}
}
@@ -774,15 +805,21 @@ class SignalSender : public Thread {
sender->SendProfilingSignal(sampler->platform_data()->vm_tid());
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SendProfilingSignal(pthread_t tid) {
if (!signal_handler_installed_) return;
pthread_kill(tid, SIGPROF);
}
- void Sleep() {
+ void Sleep(SleepInterval full_or_half) {
// Convert ms to us and subtract 100 us to compensate delays
// occuring during signal delivery.
useconds_t interval = interval_ * 1000 - 100;
+ if (full_or_half == HALF_INTERVAL) interval /= 2;
int result = usleep(interval);
#ifdef DEBUG
if (result != 0 && errno != EINTR) {
diff --git a/deps/v8/src/platform-win32.cc b/deps/v8/src/platform-win32.cc
index f00d6b01b..49463be8e 100644
--- a/deps/v8/src/platform-win32.cc
+++ b/deps/v8/src/platform-win32.cc
@@ -199,19 +199,11 @@ UNARY_MATH_FUNCTION(sin, CreateTranscendentalFunction(TranscendentalCache::SIN))
UNARY_MATH_FUNCTION(cos, CreateTranscendentalFunction(TranscendentalCache::COS))
UNARY_MATH_FUNCTION(tan, CreateTranscendentalFunction(TranscendentalCache::TAN))
UNARY_MATH_FUNCTION(log, CreateTranscendentalFunction(TranscendentalCache::LOG))
-UNARY_MATH_FUNCTION(exp, CreateExpFunction())
UNARY_MATH_FUNCTION(sqrt, CreateSqrtFunction())
#undef MATH_FUNCTION
-void lazily_initialize_fast_exp() {
- if (fast_exp_function == NULL) {
- init_fast_exp_function();
- }
-}
-
-
void MathSetup() {
#ifdef _WIN64
init_modulo_function();
@@ -220,7 +212,6 @@ void MathSetup() {
init_fast_cos_function();
init_fast_tan_function();
init_fast_log_function();
- // fast_exp is initialized lazily.
init_fast_sqrt_function();
}
@@ -1560,12 +1551,6 @@ bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
}
-bool VirtualMemory::HasLazyCommits() {
- // TODO(alph): implement for the platform.
- return false;
-}
-
-
// ----------------------------------------------------------------------------
// Win32 thread support.
@@ -2010,13 +1995,24 @@ class SamplerThread : public Thread {
SamplerRegistry::State state;
while ((state = SamplerRegistry::GetState()) !=
SamplerRegistry::HAS_NO_SAMPLERS) {
+ bool cpu_profiling_enabled =
+ (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS);
+ bool runtime_profiler_enabled = RuntimeProfiler::IsEnabled();
// When CPU profiling is enabled both JavaScript and C++ code is
// profiled. We must not suspend.
- if (state == SamplerRegistry::HAS_CPU_PROFILING_SAMPLERS) {
- SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this);
- } else {
+ if (!cpu_profiling_enabled) {
if (rate_limiter_.SuspendIfNecessary()) continue;
}
+ if (cpu_profiling_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoCpuProfile, this)) {
+ return;
+ }
+ }
+ if (runtime_profiler_enabled) {
+ if (!SamplerRegistry::IterateActiveSamplers(&DoRuntimeProfile, NULL)) {
+ return;
+ }
+ }
OS::Sleep(interval_);
}
}
@@ -2029,6 +2025,11 @@ class SamplerThread : public Thread {
sampler_thread->SampleContext(sampler);
}
+ static void DoRuntimeProfile(Sampler* sampler, void* ignored) {
+ if (!sampler->isolate()->IsInitialized()) return;
+ sampler->isolate()->runtime_profiler()->NotifyTick();
+ }
+
void SampleContext(Sampler* sampler) {
HANDLE profiled_thread = sampler->platform_data()->profiled_thread();
if (profiled_thread == NULL) return;
diff --git a/deps/v8/src/platform.h b/deps/v8/src/platform.h
index 6f75ca83d..de896acad 100644
--- a/deps/v8/src/platform.h
+++ b/deps/v8/src/platform.h
@@ -119,16 +119,12 @@ class Mutex;
double ceiling(double x);
double modulo(double x, double y);
-// Custom implementation of math functions.
+// Custom implementation of sin, cos, tan and log.
double fast_sin(double input);
double fast_cos(double input);
double fast_tan(double input);
double fast_log(double input);
-double fast_exp(double input);
double fast_sqrt(double input);
-// The custom exp implementation needs 16KB of lookup data; initialize it
-// on demand.
-void lazily_initialize_fast_exp();
// Forward declarations.
class Socket;
@@ -436,11 +432,6 @@ class VirtualMemory {
// and the same size it was reserved with.
static bool ReleaseRegion(void* base, size_t size);
- // Returns true if OS performs lazy commits, i.e. the memory allocation call
- // defers actual physical memory allocation till the first memory access.
- // Otherwise returns false.
- static bool HasLazyCommits();
-
private:
void* address_; // Start address of the virtual memory.
size_t size_; // Size of the virtual memory.
diff --git a/deps/v8/src/preparser.h b/deps/v8/src/preparser.h
index ad52d74bb..13261f7a5 100644
--- a/deps/v8/src/preparser.h
+++ b/deps/v8/src/preparser.h
@@ -150,11 +150,11 @@ class PreParser {
// Parses a single function literal, from the opening parentheses before
// parameters to the closing brace after the body.
- // Returns a FunctionEntry describing the body of the function in enough
+ // Returns a FunctionEntry describing the body of the funciton in enough
// detail that it can be lazily compiled.
// The scanner is expected to have matched the "function" keyword and
// parameters, and have consumed the initial '{'.
- // At return, unless an error occurred, the scanner is positioned before the
+ // At return, unless an error occured, the scanner is positioned before the
// the final '}'.
PreParseResult PreParseLazyFunction(i::LanguageMode mode,
i::ParserRecorder* log);
diff --git a/deps/v8/src/prettyprinter.cc b/deps/v8/src/prettyprinter.cc
index 602fbb40b..0d8dadce1 100644
--- a/deps/v8/src/prettyprinter.cc
+++ b/deps/v8/src/prettyprinter.cc
@@ -122,14 +122,6 @@ void PrettyPrinter::VisitModuleUrl(ModuleUrl* node) {
}
-void PrettyPrinter::VisitModuleStatement(ModuleStatement* node) {
- Print("module ");
- PrintLiteral(node->proxy()->name(), false);
- Print(" ");
- Visit(node->body());
-}
-
-
void PrettyPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
Print(";");
@@ -830,13 +822,6 @@ void AstPrinter::VisitModuleUrl(ModuleUrl* node) {
}
-void AstPrinter::VisitModuleStatement(ModuleStatement* node) {
- IndentedScope indent(this, "MODULE");
- PrintLiteralIndented("NAME", node->proxy()->name(), true);
- PrintStatements(node->body()->statements());
-}
-
-
void AstPrinter::VisitExpressionStatement(ExpressionStatement* node) {
Visit(node->expression());
}
diff --git a/deps/v8/src/profile-generator-inl.h b/deps/v8/src/profile-generator-inl.h
index e4f32a7c7..02e146f14 100644
--- a/deps/v8/src/profile-generator-inl.h
+++ b/deps/v8/src/profile-generator-inl.h
@@ -84,7 +84,7 @@ CodeEntry* ProfileGenerator::EntryForVMState(StateTag tag) {
return gc_entry_;
case JS:
case COMPILER:
- case PARALLEL_COMPILER:
+ case PARALLEL_COMPILER_PROLOGUE:
// DOM events handlers are reported as OTHER / EXTERNAL entries.
// To avoid confusing people, let's put all these entries into
// one bucket.
diff --git a/deps/v8/src/profile-generator.cc b/deps/v8/src/profile-generator.cc
index 6e49c7bd9..b853f33cb 100644
--- a/deps/v8/src/profile-generator.cc
+++ b/deps/v8/src/profile-generator.cc
@@ -1644,14 +1644,12 @@ HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
V8HeapExplorer::V8HeapExplorer(
HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress,
- v8::HeapProfiler::ObjectNameResolver* resolver)
+ SnapshottingProgressReportingInterface* progress)
: heap_(Isolate::Current()->heap()),
snapshot_(snapshot),
collection_(snapshot_->collection()),
progress_(progress),
- filler_(NULL),
- global_object_name_resolver_(resolver) {
+ filler_(NULL) {
}
@@ -1776,14 +1774,7 @@ void V8HeapExplorer::AddRootEntries(SnapshotFillerInterface* filler) {
const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
switch (object->map()->instance_type()) {
- case MAP_TYPE:
- switch (Map::cast(object)->instance_type()) {
-#define MAKE_STRING_MAP_CASE(instance_type, size, name, Name) \
- case instance_type: return "system / Map (" #Name ")";
- STRING_TYPE_LIST(MAKE_STRING_MAP_CASE)
-#undef MAKE_STRING_MAP_CASE
- default: return "system / Map";
- }
+ case MAP_TYPE: return "system / Map";
case JS_GLOBAL_PROPERTY_CELL_TYPE: return "system / JSGlobalPropertyCell";
case FOREIGN_TYPE: return "system / Foreign";
case ODDBALL_TYPE: return "system / Oddball";
@@ -1860,6 +1851,7 @@ void V8HeapExplorer::ExtractReferences(HeapObject* obj) {
ExtractJSObjectReferences(entry, JSObject::cast(obj));
} else if (obj->IsString()) {
ExtractStringReferences(entry, String::cast(obj));
+ extract_indexed_refs = false;
} else if (obj->IsContext()) {
ExtractContextReferences(entry, Context::cast(obj));
} else if (obj->IsMap()) {
@@ -1974,14 +1966,11 @@ void V8HeapExplorer::ExtractJSObjectReferences(
void V8HeapExplorer::ExtractStringReferences(int entry, String* string) {
if (string->IsConsString()) {
ConsString* cs = ConsString::cast(string);
- SetInternalReference(cs, entry, "first", cs->first(),
- ConsString::kFirstOffset);
- SetInternalReference(cs, entry, "second", cs->second(),
- ConsString::kSecondOffset);
+ SetInternalReference(cs, entry, "first", cs->first());
+ SetInternalReference(cs, entry, "second", cs->second());
} else if (string->IsSlicedString()) {
SlicedString* ss = SlicedString::cast(string);
- SetInternalReference(ss, entry, "parent", ss->parent(),
- SlicedString::kParentOffset);
+ SetInternalReference(ss, entry, "parent", ss->parent());
}
}
@@ -1999,7 +1988,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
"(context func. result caches)");
TagObject(context->normalized_map_cache(), "(context norm. map cache)");
TagObject(context->runtime_context(), "(runtime context)");
- TagObject(context->embedder_data(), "(context data)");
+ TagObject(context->data(), "(context data)");
NATIVE_CONTEXT_FIELDS(EXTRACT_CONTEXT_FIELD);
#undef EXTRACT_CONTEXT_FIELD
for (int i = Context::FIRST_WEAK_SLOT;
@@ -2141,11 +2130,9 @@ void V8HeapExplorer::ExtractCodeReferences(int entry, Code* code) {
SetInternalReference(code, entry,
"deoptimization_data", code->deoptimization_data(),
Code::kDeoptimizationDataOffset);
- if (code->kind() == Code::FUNCTION) {
- SetInternalReference(code, entry,
- "type_feedback_info", code->type_feedback_info(),
- Code::kTypeFeedbackInfoOffset);
- }
+ SetInternalReference(code, entry,
+ "type_feedback_info", code->type_feedback_info(),
+ Code::kTypeFeedbackInfoOffset);
SetInternalReference(code, entry,
"gc_metadata", code->gc_metadata(),
Code::kGCMetadataOffset);
@@ -2456,17 +2443,19 @@ bool V8HeapExplorer::IterateAndExtractReferences(
bool V8HeapExplorer::IsEssentialObject(Object* object) {
+ // We have to use raw_unchecked_* versions because checked versions
+ // would fail during iteration over object properties.
return object->IsHeapObject()
&& !object->IsOddball()
- && object != heap_->empty_byte_array()
- && object != heap_->empty_fixed_array()
- && object != heap_->empty_descriptor_array()
- && object != heap_->fixed_array_map()
- && object != heap_->global_property_cell_map()
- && object != heap_->shared_function_info_map()
- && object != heap_->free_space_map()
- && object != heap_->one_pointer_filler_map()
- && object != heap_->two_pointer_filler_map();
+ && object != heap_->raw_unchecked_empty_byte_array()
+ && object != heap_->raw_unchecked_empty_fixed_array()
+ && object != heap_->raw_unchecked_empty_descriptor_array()
+ && object != heap_->raw_unchecked_fixed_array_map()
+ && object != heap_->raw_unchecked_global_property_cell_map()
+ && object != heap_->raw_unchecked_shared_function_info_map()
+ && object != heap_->raw_unchecked_free_space_map()
+ && object != heap_->raw_unchecked_one_pointer_filler_map()
+ && object != heap_->raw_unchecked_two_pointer_filler_map();
}
@@ -2714,30 +2703,21 @@ void V8HeapExplorer::TagGlobalObjects() {
isolate->factory()->NewStringFromAscii(CStrVector("URL"));
const char** urls = NewArray<const char*>(enumerator.count());
for (int i = 0, l = enumerator.count(); i < l; ++i) {
- if (global_object_name_resolver_) {
- HandleScope scope;
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- urls[i] = global_object_name_resolver_->GetName(
- Utils::ToLocal(Handle<JSObject>::cast(global_obj)));
- } else {
- // TODO(yurys): This branch is going to be removed once Chromium migrates
- // to the new name resolver.
- urls[i] = NULL;
- HandleScope scope;
- Handle<JSGlobalObject> global_obj = enumerator.at(i);
- Object* obj_document;
- if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
- obj_document->IsJSObject()) {
- // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
- // As result GetProperty(*url_string) will crash.
- if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
- continue;
- JSObject* document = JSObject::cast(obj_document);
- Object* obj_url;
- if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
- obj_url->IsString()) {
- urls[i] = collection_->names()->GetName(String::cast(obj_url));
- }
+ urls[i] = NULL;
+ HandleScope scope;
+ Handle<JSGlobalObject> global_obj = enumerator.at(i);
+ Object* obj_document;
+ if (global_obj->GetProperty(*document_string)->ToObject(&obj_document) &&
+ obj_document->IsJSObject()) {
+ // FixMe: Workaround: SharedWorker's current Isolate has NULL context.
+ // As result GetProperty(*url_string) will crash.
+ if (!Isolate::Current()->context() && obj_document->IsJSGlobalProxy())
+ continue;
+ JSObject* document = JSObject::cast(obj_document);
+ Object* obj_url;
+ if (document->GetProperty(*url_string)->ToObject(&obj_url) &&
+ obj_url->IsString()) {
+ urls[i] = collection_->names()->GetName(String::cast(obj_url));
}
}
}
@@ -3092,13 +3072,11 @@ class SnapshotFiller : public SnapshotFillerInterface {
};
-HeapSnapshotGenerator::HeapSnapshotGenerator(
- HeapSnapshot* snapshot,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver)
+HeapSnapshotGenerator::HeapSnapshotGenerator(HeapSnapshot* snapshot,
+ v8::ActivityControl* control)
: snapshot_(snapshot),
control_(control),
- v8_heap_explorer_(snapshot_, this, resolver),
+ v8_heap_explorer_(snapshot_, this),
dom_explorer_(snapshot_, this) {
}
diff --git a/deps/v8/src/profile-generator.h b/deps/v8/src/profile-generator.h
index f306659ab..04f4a1c71 100644
--- a/deps/v8/src/profile-generator.h
+++ b/deps/v8/src/profile-generator.h
@@ -851,8 +851,7 @@ class SnapshottingProgressReportingInterface {
class V8HeapExplorer : public HeapEntriesAllocator {
public:
V8HeapExplorer(HeapSnapshot* snapshot,
- SnapshottingProgressReportingInterface* progress,
- v8::HeapProfiler::ObjectNameResolver* resolver);
+ SnapshottingProgressReportingInterface* progress);
virtual ~V8HeapExplorer();
virtual HeapEntry* AllocateEntry(HeapThing ptr);
void AddRootEntries(SnapshotFillerInterface* filler);
@@ -946,7 +945,6 @@ class V8HeapExplorer : public HeapEntriesAllocator {
SnapshotFillerInterface* filler_;
HeapObjectsSet objects_tags_;
HeapObjectsSet strong_gc_subroot_names_;
- v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
static HeapObject* const kGcRootsObject;
static HeapObject* const kFirstGcSubrootObject;
@@ -1023,8 +1021,7 @@ class NativeObjectsExplorer {
class HeapSnapshotGenerator : public SnapshottingProgressReportingInterface {
public:
HeapSnapshotGenerator(HeapSnapshot* snapshot,
- v8::ActivityControl* control,
- v8::HeapProfiler::ObjectNameResolver* resolver);
+ v8::ActivityControl* control);
bool GenerateSnapshot();
private:
diff --git a/deps/v8/src/property-details.h b/deps/v8/src/property-details.h
index 510e9852a..64e320514 100644
--- a/deps/v8/src/property-details.h
+++ b/deps/v8/src/property-details.h
@@ -38,10 +38,6 @@ enum PropertyAttributes {
READ_ONLY = v8::ReadOnly,
DONT_ENUM = v8::DontEnum,
DONT_DELETE = v8::DontDelete,
-
- SEALED = DONT_ENUM | DONT_DELETE,
- FROZEN = SEALED | READ_ONLY,
-
ABSENT = 16 // Used in runtime to indicate a property is absent.
// ABSENT can never be stored in or returned from a descriptor's attributes
// bitfield. It is only used as a return value meaning the attributes of
diff --git a/deps/v8/src/property.cc b/deps/v8/src/property.cc
index cbf2fc859..05342eea9 100644
--- a/deps/v8/src/property.cc
+++ b/deps/v8/src/property.cc
@@ -63,7 +63,7 @@ void LookupResult::Print(FILE* out) {
break;
case FIELD:
PrintF(out, " -type = field\n");
- PrintF(out, " -index = %d", GetFieldIndex().field_index());
+ PrintF(out, " -index = %d", GetFieldIndex());
PrintF(out, "\n");
break;
case CALLBACKS:
diff --git a/deps/v8/src/property.h b/deps/v8/src/property.h
index c41c6dc81..9eb4194b4 100644
--- a/deps/v8/src/property.h
+++ b/deps/v8/src/property.h
@@ -132,44 +132,6 @@ class CallbacksDescriptor: public Descriptor {
};
-// Holds a property index value distinguishing if it is a field index or an
-// index inside the object header.
-class PropertyIndex {
- public:
- static PropertyIndex NewFieldIndex(int index) {
- return PropertyIndex(index, false);
- }
- static PropertyIndex NewHeaderIndex(int index) {
- return PropertyIndex(index, true);
- }
-
- bool is_field_index() { return (index_ & kHeaderIndexBit) == 0; }
- bool is_header_index() { return (index_ & kHeaderIndexBit) != 0; }
-
- int field_index() {
- ASSERT(is_field_index());
- return value();
- }
- int header_index() {
- ASSERT(is_header_index());
- return value();
- }
-
- private:
- static const int kHeaderIndexBit = 1 << 31;
- static const int kIndexMask = ~kHeaderIndexBit;
-
- int value() { return index_ & kIndexMask; }
-
- PropertyIndex(int index, bool is_header_based)
- : index_(index | (is_header_based ? kHeaderIndexBit : 0)) {
- ASSERT(index <= kIndexMask);
- }
-
- int index_;
-};
-
-
class LookupResult BASE_EMBEDDED {
public:
explicit LookupResult(Isolate* isolate)
@@ -316,7 +278,7 @@ class LookupResult BASE_EMBEDDED {
Object* GetLazyValue() {
switch (type()) {
case FIELD:
- return holder()->FastPropertyAt(GetFieldIndex().field_index());
+ return holder()->FastPropertyAt(GetFieldIndex());
case NORMAL: {
Object* value;
value = holder()->property_dictionary()->ValueAt(GetDictionaryEntry());
@@ -328,7 +290,7 @@ class LookupResult BASE_EMBEDDED {
case CONSTANT_FUNCTION:
return GetConstantFunction();
default:
- return Isolate::Current()->heap()->the_hole_value();
+ return Smi::FromInt(0);
}
}
@@ -372,11 +334,10 @@ class LookupResult BASE_EMBEDDED {
return number_;
}
- PropertyIndex GetFieldIndex() {
+ int GetFieldIndex() {
ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
ASSERT(IsField());
- return PropertyIndex::NewFieldIndex(
- Descriptor::IndexFromValue(GetValue()));
+ return Descriptor::IndexFromValue(GetValue());
}
int GetLocalFieldIndexFromMap(Map* map) {
diff --git a/deps/v8/src/proxy.js b/deps/v8/src/proxy.js
index 53a357247..4e86c8892 100644
--- a/deps/v8/src/proxy.js
+++ b/deps/v8/src/proxy.js
@@ -31,7 +31,7 @@ global.Proxy = new $Object();
var $Proxy = global.Proxy
-function ProxyCreate(handler, proto) {
+$Proxy.create = function(handler, proto) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (IS_UNDEFINED(proto))
@@ -41,7 +41,7 @@ function ProxyCreate(handler, proto) {
return %CreateJSProxy(handler, proto)
}
-function ProxyCreateFunction(handler, callTrap, constructTrap) {
+$Proxy.createFunction = function(handler, callTrap, constructTrap) {
if (!IS_SPEC_OBJECT(handler))
throw MakeTypeError("handler_non_object", ["create"])
if (!IS_SPEC_FUNCTION(callTrap))
@@ -62,11 +62,6 @@ function ProxyCreateFunction(handler, callTrap, constructTrap) {
handler, callTrap, constructTrap, $Function.prototype)
}
-%CheckIsBootstrapping()
-InstallFunctions($Proxy, DONT_ENUM, [
- "create", ProxyCreate,
- "createFunction", ProxyCreateFunction
-])
////////////////////////////////////////////////////////////////////////////////
diff --git a/deps/v8/src/regexp-macro-assembler.cc b/deps/v8/src/regexp-macro-assembler.cc
index ee9347acb..82ba34d5c 100644
--- a/deps/v8/src/regexp-macro-assembler.cc
+++ b/deps/v8/src/regexp-macro-assembler.cc
@@ -77,14 +77,14 @@ const byte* NativeRegExpMacroAssembler::StringCharacterPosition(
ASSERT(subject->IsExternalString() || subject->IsSeqString());
ASSERT(start_index >= 0);
ASSERT(start_index <= subject->length());
- if (subject->IsOneByteRepresentation()) {
+ if (subject->IsAsciiRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
const char* data = ExternalAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
} else {
- ASSERT(subject->IsSeqOneByteString());
- char* data = SeqOneByteString::cast(subject)->GetChars();
+ ASSERT(subject->IsSeqAsciiString());
+ char* data = SeqAsciiString::cast(subject)->GetChars();
address = reinterpret_cast<const byte*>(data);
}
return address + start_index;
@@ -133,7 +133,7 @@ NativeRegExpMacroAssembler::Result NativeRegExpMacroAssembler::Match(
slice_offset = slice->offset();
}
// Ensure that an underlying string has the same ASCII-ness.
- bool is_ascii = subject_ptr->IsOneByteRepresentation();
+ bool is_ascii = subject_ptr->IsAsciiRepresentation();
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1;
diff --git a/deps/v8/src/regexp-stack.cc b/deps/v8/src/regexp-stack.cc
index 325a1496c..ff9547f3a 100644
--- a/deps/v8/src/regexp-stack.cc
+++ b/deps/v8/src/regexp-stack.cc
@@ -51,7 +51,6 @@ RegExpStack::RegExpStack()
RegExpStack::~RegExpStack() {
- thread_local_.Free();
}
diff --git a/deps/v8/src/regexp.js b/deps/v8/src/regexp.js
index da1883f3a..a3675f033 100644
--- a/deps/v8/src/regexp.js
+++ b/deps/v8/src/regexp.js
@@ -161,7 +161,6 @@ function RegExpExecNoTests(regexp, string, start) {
lastMatchInfoOverride = null;
return BuildResultFromMatchInfo(matchInfo, string);
}
- regexp.lastIndex = 0;
return null;
}
@@ -194,7 +193,7 @@ function RegExpExec(string) {
var matchIndices = %_RegExpExec(this, string, i, lastMatchInfo);
if (matchIndices === null) {
- this.lastIndex = 0;
+ if (global) this.lastIndex = 0;
return null;
}
@@ -257,10 +256,7 @@ function RegExpTest(string) {
%_Log('regexp', 'regexp-exec,%0r,%1S,%2i', [regexp, string, lastIndex]);
// matchIndices is either null or the lastMatchInfo array.
var matchIndices = %_RegExpExec(regexp, string, 0, lastMatchInfo);
- if (matchIndices === null) {
- this.lastIndex = 0;
- return false;
- }
+ if (matchIndices === null) return false;
lastMatchInfoOverride = null;
return true;
}
diff --git a/deps/v8/src/rewriter.cc b/deps/v8/src/rewriter.cc
index 2a9878717..6541546cb 100644
--- a/deps/v8/src/rewriter.cc
+++ b/deps/v8/src/rewriter.cc
@@ -109,13 +109,6 @@ void Processor::VisitBlock(Block* node) {
}
-void Processor::VisitModuleStatement(ModuleStatement* node) {
- bool set_after_body = is_set_;
- Visit(node->body());
- is_set_ = is_set_ && set_after_body;
-}
-
-
void Processor::VisitExpressionStatement(ExpressionStatement* node) {
// Rewrite : <x>; -> .result = <x>;
if (!is_set_ && !node->expression()->IsThrow()) {
@@ -264,7 +257,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
// coincides with the end of the with scope which is the position of '1'.
int position = function->end_position();
VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
- result->name(), false, result->interface(), position);
+ result->name(), false, Interface::NewValue(), position);
result_proxy->BindTo(result);
Statement* result_statement =
processor.factory()->NewReturnStatement(result_proxy);
diff --git a/deps/v8/src/runtime-profiler.cc b/deps/v8/src/runtime-profiler.cc
index 262cd1d58..23f41fa7d 100644
--- a/deps/v8/src/runtime-profiler.cc
+++ b/deps/v8/src/runtime-profiler.cc
@@ -140,9 +140,6 @@ static void GetICCounts(JSFunction* function,
void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
ASSERT(function->IsOptimizable());
- // If we are in manual mode, don't auto-optimize anything.
- if (FLAG_manual_parallel_recompilation) return;
-
if (FLAG_trace_opt) {
PrintF("[marking ");
function->PrintName();
@@ -196,9 +193,16 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
// Get the stack check stub code object to match against. We aren't
// prepared to generate it, but we don't expect to have to.
+ bool found_code = false;
Code* stack_check_code = NULL;
- InterruptStub interrupt_stub;
- bool found_code = interrupt_stub.FindCodeInCache(&stack_check_code, isolate_);
+ if (FLAG_count_based_interrupts) {
+ InterruptStub interrupt_stub;
+ found_code = interrupt_stub.FindCodeInCache(&stack_check_code);
+ } else // NOLINT
+ { // NOLINT
+ StackCheckStub check_stub;
+ found_code = check_stub.FindCodeInCache(&stack_check_code);
+ }
if (found_code) {
Code* replacement_code =
isolate_->builtins()->builtin(Builtins::kOnStackReplacement);
@@ -372,6 +376,12 @@ void RuntimeProfiler::OptimizeNow() {
}
+void RuntimeProfiler::NotifyTick() {
+ if (FLAG_count_based_interrupts) return;
+ isolate_->stack_guard()->RequestRuntimeProfilerTick();
+}
+
+
void RuntimeProfiler::SetUp() {
ASSERT(has_been_globally_set_up_);
if (!FLAG_watch_ic_patching) {
diff --git a/deps/v8/src/runtime-profiler.h b/deps/v8/src/runtime-profiler.h
index 507535f0b..ab6cb378e 100644
--- a/deps/v8/src/runtime-profiler.h
+++ b/deps/v8/src/runtime-profiler.h
@@ -52,6 +52,8 @@ class RuntimeProfiler {
void OptimizeNow();
+ void NotifyTick();
+
void SetUp();
void Reset();
void TearDown();
diff --git a/deps/v8/src/runtime.cc b/deps/v8/src/runtime.cc
index 09ca04706..19d9a3f0b 100644
--- a/deps/v8/src/runtime.cc
+++ b/deps/v8/src/runtime.cc
@@ -46,7 +46,6 @@
#include "isolate-inl.h"
#include "jsregexp.h"
#include "json-parser.h"
-#include "json-stringifier.h"
#include "liveedit.h"
#include "liveobjectlist-inl.h"
#include "misc-intrinsics.h"
@@ -783,15 +782,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetDelete) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetGetSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
- Handle<ObjectHashSet> table(ObjectHashSet::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_MapInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -851,15 +841,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_MapSet) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_MapGetSize) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
- Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
- return Smi::FromInt(table->NumberOfElements());
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_WeakMapInitialize) {
HandleScope scope(isolate);
ASSERT(args.length() == 1);
@@ -967,107 +948,104 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsInPrototypeChain) {
}
-static bool CheckAccessException(Object* callback,
- v8::AccessType access_type) {
- if (callback->IsAccessorInfo()) {
- AccessorInfo* info = AccessorInfo::cast(callback);
- return
- (access_type == v8::ACCESS_HAS &&
- (info->all_can_read() || info->all_can_write())) ||
- (access_type == v8::ACCESS_GET && info->all_can_read()) ||
- (access_type == v8::ACCESS_SET && info->all_can_write());
- }
- return false;
-}
+// Recursively traverses hidden prototypes if property is not found
+static void GetOwnPropertyImplementation(JSObject* obj,
+ String* name,
+ LookupResult* result) {
+ obj->LocalLookupRealNamedProperty(name, result);
+ if (result->IsFound()) return;
-template<class Key>
-static bool CheckGenericAccess(
- JSObject* receiver,
- JSObject* holder,
- Key key,
- v8::AccessType access_type,
- bool (Isolate::*mayAccess)(JSObject*, Key, v8::AccessType)) {
- Isolate* isolate = receiver->GetIsolate();
- for (JSObject* current = receiver;
- true;
- current = JSObject::cast(current->GetPrototype())) {
- if (current->IsAccessCheckNeeded() &&
- !(isolate->*mayAccess)(current, key, access_type)) {
- return false;
- }
- if (current == holder) break;
- }
- return true;
+ Object* proto = obj->GetPrototype();
+ if (proto->IsJSObject() &&
+ JSObject::cast(proto)->map()->is_hidden_prototype())
+ GetOwnPropertyImplementation(JSObject::cast(proto),
+ name, result);
}
-enum AccessCheckResult {
- ACCESS_FORBIDDEN,
- ACCESS_ALLOWED,
- ACCESS_ABSENT
-};
-
-
-static AccessCheckResult CheckElementAccess(
- JSObject* obj,
- uint32_t index,
- v8::AccessType access_type) {
- // TODO(1095): we should traverse hidden prototype hierachy as well.
- if (CheckGenericAccess(
- obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
- return ACCESS_ALLOWED;
+static bool CheckAccessException(LookupResult* result,
+ v8::AccessType access_type) {
+ if (result->type() == CALLBACKS) {
+ Object* callback = result->GetCallbackObject();
+ if (callback->IsAccessorInfo()) {
+ AccessorInfo* info = AccessorInfo::cast(callback);
+ bool can_access =
+ (access_type == v8::ACCESS_HAS &&
+ (info->all_can_read() || info->all_can_write())) ||
+ (access_type == v8::ACCESS_GET && info->all_can_read()) ||
+ (access_type == v8::ACCESS_SET && info->all_can_write());
+ return can_access;
+ }
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
- return ACCESS_FORBIDDEN;
+ return false;
}
-static AccessCheckResult CheckPropertyAccess(
- JSObject* obj,
- String* name,
- v8::AccessType access_type) {
- uint32_t index;
- if (name->AsArrayIndex(&index)) {
- return CheckElementAccess(obj, index, access_type);
- }
+static bool CheckAccess(JSObject* obj,
+ String* name,
+ LookupResult* result,
+ v8::AccessType access_type) {
+ ASSERT(result->IsProperty());
- LookupResult lookup(obj->GetIsolate());
- obj->LocalLookup(name, &lookup, true);
+ JSObject* holder = result->holder();
+ JSObject* current = obj;
+ Isolate* isolate = obj->GetIsolate();
+ while (true) {
+ if (current->IsAccessCheckNeeded() &&
+ !isolate->MayNamedAccess(current, name, access_type)) {
+ // Access check callback denied the access, but some properties
+ // can have a special permissions which override callbacks descision
+ // (currently see v8::AccessControl).
+ break;
+ }
+
+ if (current == holder) {
+ return true;
+ }
- if (!lookup.IsProperty()) return ACCESS_ABSENT;
- if (CheckGenericAccess<Object*>(
- obj, lookup.holder(), name, access_type, &Isolate::MayNamedAccess)) {
- return ACCESS_ALLOWED;
+ current = JSObject::cast(current->GetPrototype());
}
- // Access check callback denied the access, but some properties
- // can have a special permissions which override callbacks descision
- // (currently see v8::AccessControl).
// API callbacks can have per callback access exceptions.
- switch (lookup.type()) {
- case CALLBACKS:
- if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
- return ACCESS_ALLOWED;
+ switch (result->type()) {
+ case CALLBACKS: {
+ if (CheckAccessException(result, access_type)) {
+ return true;
}
break;
- case INTERCEPTOR:
+ }
+ case INTERCEPTOR: {
// If the object has an interceptor, try real named properties.
// Overwrite the result to fetch the correct property later.
- lookup.holder()->LookupRealNamedProperty(name, &lookup);
- if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
- if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
- return ACCESS_ALLOWED;
+ holder->LookupRealNamedProperty(name, result);
+ if (result->IsProperty()) {
+ if (CheckAccessException(result, access_type)) {
+ return true;
}
}
break;
+ }
default:
break;
}
- obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
- return ACCESS_FORBIDDEN;
+ isolate->ReportFailedAccessCheck(current, access_type);
+ return false;
+}
+
+
+// TODO(1095): we should traverse hidden prototype hierachy as well.
+static bool CheckElementAccess(JSObject* obj,
+ uint32_t index,
+ v8::AccessType access_type) {
+ if (obj->IsAccessCheckNeeded() &&
+ !obj->GetIsolate()->MayIndexedAccess(obj, index, access_type)) {
+ return false;
+ }
+
+ return true;
}
@@ -1088,44 +1066,141 @@ static MaybeObject* GetOwnProperty(Isolate* isolate,
Handle<JSObject> obj,
Handle<String> name) {
Heap* heap = isolate->heap();
- // Due to some WebKit tests, we want to make sure that we do not log
- // more than one access failure here.
- switch (CheckPropertyAccess(*obj, *name, v8::ACCESS_HAS)) {
- case ACCESS_FORBIDDEN: return heap->false_value();
- case ACCESS_ALLOWED: break;
- case ACCESS_ABSENT: return heap->undefined_value();
+ Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
+ Handle<JSArray> desc = isolate->factory()->NewJSArrayWithElements(elms);
+ LookupResult result(isolate);
+ // This could be an element.
+ uint32_t index;
+ if (name->AsArrayIndex(&index)) {
+ switch (obj->HasLocalElement(index)) {
+ case JSObject::UNDEFINED_ELEMENT:
+ return heap->undefined_value();
+
+ case JSObject::STRING_CHARACTER_ELEMENT: {
+ // Special handling of string objects according to ECMAScript 5
+ // 15.5.5.2. Note that this might be a string object with elements
+ // other than the actual string value. This is covered by the
+ // subsequent cases.
+ Handle<JSValue> js_value = Handle<JSValue>::cast(obj);
+ Handle<String> str(String::cast(js_value->value()));
+ Handle<String> substr = SubString(str, index, index + 1, NOT_TENURED);
+
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ elms->set(VALUE_INDEX, *substr);
+ elms->set(WRITABLE_INDEX, heap->false_value());
+ elms->set(ENUMERABLE_INDEX, heap->true_value());
+ elms->set(CONFIGURABLE_INDEX, heap->false_value());
+ return *desc;
+ }
+
+ case JSObject::INTERCEPTED_ELEMENT:
+ case JSObject::FAST_ELEMENT: {
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ Handle<Object> value = Object::GetElement(obj, index);
+ RETURN_IF_EMPTY_HANDLE(isolate, value);
+ elms->set(VALUE_INDEX, *value);
+ elms->set(WRITABLE_INDEX, heap->true_value());
+ elms->set(ENUMERABLE_INDEX, heap->true_value());
+ elms->set(CONFIGURABLE_INDEX, heap->true_value());
+ return *desc;
+ }
+
+ case JSObject::DICTIONARY_ELEMENT: {
+ Handle<JSObject> holder = obj;
+ if (obj->IsJSGlobalProxy()) {
+ Object* proto = obj->GetPrototype();
+ if (proto->IsNull()) return heap->undefined_value();
+ ASSERT(proto->IsJSGlobalObject());
+ holder = Handle<JSObject>(JSObject::cast(proto));
+ }
+ FixedArray* elements = FixedArray::cast(holder->elements());
+ SeededNumberDictionary* dictionary = NULL;
+ if (elements->map() == heap->non_strict_arguments_elements_map()) {
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = SeededNumberDictionary::cast(elements);
+ }
+ int entry = dictionary->FindEntry(index);
+ ASSERT(entry != SeededNumberDictionary::kNotFound);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ switch (details.type()) {
+ case CALLBACKS: {
+ // This is an accessor property with getter and/or setter.
+ AccessorPair* accessors =
+ AccessorPair::cast(dictionary->ValueAt(entry));
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
+ if (CheckElementAccess(*obj, index, v8::ACCESS_GET)) {
+ elms->set(GETTER_INDEX, accessors->GetComponent(ACCESSOR_GETTER));
+ }
+ if (CheckElementAccess(*obj, index, v8::ACCESS_SET)) {
+ elms->set(SETTER_INDEX, accessors->GetComponent(ACCESSOR_SETTER));
+ }
+ break;
+ }
+ case NORMAL: {
+ // This is a data property.
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ Handle<Object> value = Object::GetElement(obj, index);
+ ASSERT(!value.is_null());
+ elms->set(VALUE_INDEX, *value);
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!details.IsReadOnly()));
+ break;
+ }
+ default:
+ UNREACHABLE();
+ break;
+ }
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!details.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!details.IsDontDelete()));
+ return *desc;
+ }
+ }
}
- PropertyAttributes attrs = obj->GetLocalPropertyAttribute(*name);
- if (attrs == ABSENT) return heap->undefined_value();
- AccessorPair* raw_accessors = obj->GetLocalPropertyAccessorPair(*name);
- Handle<AccessorPair> accessors(raw_accessors, isolate);
+ // Use recursive implementation to also traverse hidden prototypes
+ GetOwnPropertyImplementation(*obj, *name, &result);
- Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
- elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
- elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
- elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(raw_accessors != NULL));
-
- if (raw_accessors == NULL) {
- elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
- // GetProperty does access check.
- Handle<Object> value = GetProperty(obj, name);
- if (value.is_null()) return Failure::Exception();
- elms->set(VALUE_INDEX, *value);
- } else {
- // Access checks are performed for both accessors separately.
- // When they fail, the respective field is not set in the descriptor.
+ if (!result.IsProperty()) {
+ return heap->undefined_value();
+ }
+
+ if (!CheckAccess(*obj, *name, &result, v8::ACCESS_HAS)) {
+ return heap->false_value();
+ }
+
+ elms->set(ENUMERABLE_INDEX, heap->ToBoolean(!result.IsDontEnum()));
+ elms->set(CONFIGURABLE_INDEX, heap->ToBoolean(!result.IsDontDelete()));
+
+ bool is_js_accessor = result.IsPropertyCallbacks() &&
+ (result.GetCallbackObject()->IsAccessorPair());
+
+ if (is_js_accessor) {
+ // __defineGetter__/__defineSetter__ callback.
+ elms->set(IS_ACCESSOR_INDEX, heap->true_value());
+
+ AccessorPair* accessors = AccessorPair::cast(result.GetCallbackObject());
Object* getter = accessors->GetComponent(ACCESSOR_GETTER);
- Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
- if (!getter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_GET)) {
+ if (!getter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_GET)) {
elms->set(GETTER_INDEX, getter);
}
- if (!setter->IsMap() && CheckPropertyAccess(*obj, *name, v8::ACCESS_SET)) {
+ Object* setter = accessors->GetComponent(ACCESSOR_SETTER);
+ if (!setter->IsMap() && CheckAccess(*obj, *name, &result, v8::ACCESS_SET)) {
elms->set(SETTER_INDEX, setter);
}
+ } else {
+ elms->set(IS_ACCESSOR_INDEX, heap->false_value());
+ elms->set(WRITABLE_INDEX, heap->ToBoolean(!result.IsReadOnly()));
+
+ PropertyAttributes attrs;
+ Object* value;
+ // GetProperty will check access and report any violations.
+ { MaybeObject* maybe_value = obj->GetProperty(*obj, &result, *name, &attrs);
+ if (!maybe_value->ToObject(&value)) return maybe_value;
+ }
+ elms->set(VALUE_INDEX, value);
}
- return *isolate->factory()->NewJSArrayWithElements(elms);
+ return *desc;
}
@@ -1283,7 +1358,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
bool is_var = value->IsUndefined();
bool is_const = value->IsTheHole();
bool is_function = value->IsSharedFunctionInfo();
- ASSERT(is_var + is_const + is_function == 1);
+ bool is_module = value->IsJSModule();
+ ASSERT(is_var + is_const + is_function + is_module == 1);
if (is_var || is_const) {
// Lookup the property in the global object, and don't set the
@@ -1291,7 +1367,13 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
// Do the lookup locally only, see ES5 erratum.
LookupResult lookup(isolate);
if (FLAG_es52_globals) {
- global->LocalLookup(*name, &lookup, true);
+ Object* obj = *global;
+ do {
+ JSObject::cast(obj)->LocalLookup(*name, &lookup);
+ if (lookup.IsFound()) break;
+ obj = obj->GetPrototype();
+ } while (obj->IsJSObject() &&
+ JSObject::cast(obj)->map()->is_hidden_prototype());
} else {
global->Lookup(*name, &lookup);
}
@@ -1315,29 +1397,30 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareGlobals) {
}
LookupResult lookup(isolate);
- global->LocalLookup(*name, &lookup, true);
+ global->LocalLookup(*name, &lookup);
// Compute the property attributes. According to ECMA-262,
// the property must be non-configurable except in eval.
int attr = NONE;
bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
- if (!is_eval) {
+ if (!is_eval || is_module) {
attr |= DONT_DELETE;
}
bool is_native = DeclareGlobalsNativeFlag::decode(flags);
- if (is_const || (is_native && is_function)) {
+ if (is_const || is_module || (is_native && is_function)) {
attr |= READ_ONLY;
}
LanguageMode language_mode = DeclareGlobalsLanguageMode::decode(flags);
- if (!lookup.IsFound() || is_function) {
+ if (!lookup.IsFound() || is_function || is_module) {
// If the local property exists, check that we can reconfigure it
// as required for function declarations.
if (lookup.IsFound() && lookup.IsDontDelete()) {
if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
lookup.IsPropertyCallbacks()) {
- return ThrowRedeclarationError(isolate, "function", name);
+ return ThrowRedeclarationError(
+ isolate, is_function ? "function" : "module", name);
}
// If the existing property is not configurable, keep its attributes.
attr = lookup.GetAttributes();
@@ -1493,20 +1576,27 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeVarGlobal) {
// the whole chain of hidden prototypes to do a 'local' lookup.
Object* object = global;
LookupResult lookup(isolate);
- JSObject::cast(object)->LocalLookup(*name, &lookup, true);
- if (lookup.IsInterceptor()) {
- HandleScope handle_scope(isolate);
- PropertyAttributes intercepted =
- lookup.holder()->GetPropertyAttribute(*name);
- if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
- // Found an interceptor that's not read only.
- if (assign) {
- return lookup.holder()->SetProperty(
- &lookup, *name, args[2], attributes, strict_mode_flag);
- } else {
- return isolate->heap()->undefined_value();
+ while (object->IsJSObject() &&
+ JSObject::cast(object)->map()->is_hidden_prototype()) {
+ JSObject* raw_holder = JSObject::cast(object);
+ raw_holder->LocalLookup(*name, &lookup);
+ if (lookup.IsInterceptor()) {
+ HandleScope handle_scope(isolate);
+ Handle<JSObject> holder(raw_holder);
+ PropertyAttributes intercepted = holder->GetPropertyAttribute(*name);
+ // Update the raw pointer in case it's changed due to GC.
+ raw_holder = *holder;
+ if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
+ // Found an interceptor that's not read only.
+ if (assign) {
+ return raw_holder->SetProperty(
+ &lookup, *name, args[2], attributes, strict_mode_flag);
+ } else {
+ return isolate->heap()->undefined_value();
+ }
}
}
+ object = raw_holder->GetPrototype();
}
// Reload global in case the loop above performed a GC.
@@ -1570,7 +1660,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstGlobal) {
// Strict mode handling not needed (const is disallowed in strict mode).
if (lookup.IsField()) {
FixedArray* properties = global->properties();
- int index = lookup.GetFieldIndex().field_index();
+ int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
properties->set(index, *value);
}
@@ -1660,7 +1750,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_InitializeConstContextSlot) {
if (lookup.IsField()) {
FixedArray* properties = object->properties();
- int index = lookup.GetFieldIndex().field_index();
+ int index = lookup.GetFieldIndex();
if (properties->get(index)->IsTheHole()) {
properties->set(index, *value);
}
@@ -1791,8 +1881,9 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_RegExpInitializeObject) {
JSRegExp::kIgnoreCaseFieldIndex, ignoreCase, SKIP_WRITE_BARRIER);
regexp->InObjectPropertyAtPut(
JSRegExp::kMultilineFieldIndex, multiline, SKIP_WRITE_BARRIER);
- regexp->InObjectPropertyAtPut(
- JSRegExp::kLastIndexFieldIndex, Smi::FromInt(0), SKIP_WRITE_BARRIER);
+ regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
+ Smi::FromInt(0),
+ SKIP_WRITE_BARRIER); // It's a Smi.
return regexp;
}
@@ -2147,7 +2238,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SetCode) {
// target function to undefined. SetCode is only used for built-in
// constructors like String, Array, and Object, and some web code
// doesn't like seeing source code for constructors.
- target_shared->ReplaceCode(source_shared->code());
+ target_shared->set_code(source_shared->code());
target_shared->set_scope_info(source_shared->scope_info());
target_shared->set_length(source_shared->length());
target_shared->set_formal_parameter_count(
@@ -2351,7 +2442,7 @@ class ReplacementStringBuilder {
array_builder_(heap->isolate(), estimated_part_count),
subject_(subject),
character_count_(0),
- is_ascii_(subject->IsOneByteRepresentation()) {
+ is_ascii_(subject->IsAsciiRepresentation()) {
// Require a non-zero initial size. Ensures that doubling the size to
// extend the array will work.
ASSERT(estimated_part_count > 0);
@@ -2391,7 +2482,7 @@ class ReplacementStringBuilder {
int length = string->length();
ASSERT(length > 0);
AddElement(*string);
- if (!string->IsOneByteRepresentation()) {
+ if (!string->IsAsciiRepresentation()) {
is_ascii_ = false;
}
IncrementCharacterCount(length);
@@ -2405,7 +2496,7 @@ class ReplacementStringBuilder {
Handle<String> joined_string;
if (is_ascii_) {
- Handle<SeqOneByteString> seq = NewRawOneByteString(character_count_);
+ Handle<SeqAsciiString> seq = NewRawAsciiString(character_count_);
AssertNoAllocation no_alloc;
char* char_buffer = seq->GetChars();
StringBuilderConcatHelper(*subject_,
@@ -2436,8 +2527,8 @@ class ReplacementStringBuilder {
}
private:
- Handle<SeqOneByteString> NewRawOneByteString(int length) {
- return heap_->isolate()->factory()->NewRawOneByteString(length);
+ Handle<SeqAsciiString> NewRawAsciiString(int length) {
+ return heap_->isolate()->factory()->NewRawAsciiString(length);
}
@@ -2762,23 +2853,6 @@ void FindAsciiStringIndices(Vector<const char> subject,
}
-void FindTwoByteStringIndices(const Vector<const uc16> subject,
- uc16 pattern,
- ZoneList<int>* indices,
- unsigned int limit,
- Zone* zone) {
- ASSERT(limit > 0);
- const uc16* subject_start = subject.start();
- const uc16* subject_end = subject_start + subject.length();
- for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
- if (*pos == pattern) {
- indices->Add(static_cast<int>(pos - subject_start), zone);
- limit--;
- }
- }
-}
-
-
template <typename SubjectChar, typename PatternChar>
void FindStringIndices(Isolate* isolate,
Vector<const SubjectChar> subject,
@@ -2843,37 +2917,19 @@ void FindStringIndicesDispatch(Isolate* isolate,
} else {
Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
if (pattern_content.IsAscii()) {
- Vector<const char> pattern_vector = pattern_content.ToAsciiVector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_content.ToAsciiVector(),
+ indices,
+ limit,
+ zone);
} else {
- Vector<const uc16> pattern_vector = pattern_content.ToUC16Vector();
- if (pattern_vector.length() == 1) {
- FindTwoByteStringIndices(subject_vector,
- pattern_vector[0],
- indices,
- limit,
- zone);
- } else {
- FindStringIndices(isolate,
- subject_vector,
- pattern_vector,
- indices,
- limit,
- zone);
- }
+ FindStringIndices(isolate,
+ subject_vector,
+ pattern_content.ToUC16Vector(),
+ indices,
+ limit,
+ zone);
}
}
}
@@ -2904,9 +2960,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
isolate, *subject, pattern, &indices, 0xffffffff, zone);
int matches = indices.length();
- if (matches == 0) {
- return isolate->heap()->undefined_value();
- }
+ if (matches == 0) return *subject;
// Detect integer overflow.
int64_t result_len_64 =
@@ -2923,7 +2977,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceAtomRegExpWithString(
Handle<ResultSeqString> result;
if (ResultSeqString::kHasAsciiEncoding) {
result = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(result_len));
+ isolate->factory()->NewRawAsciiString(result_len));
} else {
result = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(result_len));
@@ -2992,7 +3046,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
regexp->TypeTag() == JSRegExp::ATOM &&
simple_replace) {
if (subject->HasOnlyAsciiChars() && replacement->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqOneByteString>(
+ return StringReplaceAtomRegExpWithString<SeqAsciiString>(
isolate, subject, regexp, replacement, last_match_info);
} else {
return StringReplaceAtomRegExpWithString<SeqTwoByteString>(
@@ -3006,7 +3060,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithString(
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
if (global_cache.HasException()) return Failure::Exception();
- return isolate->heap()->undefined_value();
+ return *subject;
}
// Guessing the number of parts that the final result string is built
@@ -3080,9 +3134,9 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
// Shortcut for simple non-regexp global replacements
if (is_global &&
regexp->TypeTag() == JSRegExp::ATOM) {
- Handle<String> empty_string = isolate->factory()->empty_string();
+ Handle<String> empty_string(HEAP->empty_string());
if (subject->HasOnlyAsciiChars()) {
- return StringReplaceAtomRegExpWithString<SeqOneByteString>(
+ return StringReplaceAtomRegExpWithString<SeqAsciiString>(
isolate,
subject,
regexp,
@@ -3104,7 +3158,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
int32_t* current_match = global_cache.FetchNext();
if (current_match == NULL) {
if (global_cache.HasException()) return Failure::Exception();
- return isolate->heap()->undefined_value();
+ return *subject;
}
int start = current_match[0];
@@ -3118,7 +3172,7 @@ MUST_USE_RESULT static MaybeObject* StringReplaceRegExpWithEmptyString(
Handle<ResultSeqString> answer;
if (ResultSeqString::kHasAsciiEncoding) {
answer = Handle<ResultSeqString>::cast(
- isolate->factory()->NewRawOneByteString(new_length));
+ isolate->factory()->NewRawAsciiString(new_length));
} else {
answer = Handle<ResultSeqString>::cast(
isolate->factory()->NewRawTwoByteString(new_length));
@@ -3210,7 +3264,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringReplaceRegExpWithString) {
if (replacement->length() == 0) {
if (subject->HasOnlyAsciiChars()) {
- return StringReplaceRegExpWithEmptyString<SeqOneByteString>(
+ return StringReplaceRegExpWithEmptyString<SeqAsciiString>(
isolate, subject, regexp, last_match_info);
} else {
return StringReplaceRegExpWithEmptyString<SeqTwoByteString>(
@@ -3795,7 +3849,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToRadixString) {
}
char* str = DoubleToRadixCString(value, radix);
MaybeObject* result =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return result;
}
@@ -3806,12 +3860,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToFixed) {
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ if (isnan(value)) {
+ return *isolate->factory()->nan_symbol();
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return *isolate->factory()->minus_infinity_symbol();
+ }
+ return *isolate->factory()->infinity_symbol();
+ }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 0);
char* str = DoubleToFixedCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return res;
}
@@ -3822,12 +3885,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToExponential) {
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ if (isnan(value)) {
+ return *isolate->factory()->nan_symbol();
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return *isolate->factory()->minus_infinity_symbol();
+ }
+ return *isolate->factory()->infinity_symbol();
+ }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= -1 && f <= 20);
char* str = DoubleToExponentialCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return res;
}
@@ -3838,12 +3910,21 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_NumberToPrecision) {
ASSERT(args.length() == 2);
CONVERT_DOUBLE_ARG_CHECKED(value, 0);
+ if (isnan(value)) {
+ return *isolate->factory()->nan_symbol();
+ }
+ if (isinf(value)) {
+ if (value < 0) {
+ return *isolate->factory()->minus_infinity_symbol();
+ }
+ return *isolate->factory()->infinity_symbol();
+ }
CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
int f = FastD2IChecked(f_number);
RUNTIME_ASSERT(f >= 1 && f <= 21);
char* str = DoubleToPrecisionCString(value, f);
MaybeObject* res =
- isolate->heap()->AllocateStringFromOneByte(CStrVector(str));
+ isolate->heap()->AllocateStringFromAscii(CStrVector(str));
DeleteArray(str);
return res;
}
@@ -3976,7 +4057,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
LookupResult result(isolate);
receiver->LocalLookup(key, &result);
if (result.IsField()) {
- int offset = result.GetFieldIndex().field_index();
+ int offset = result.GetFieldIndex();
keyed_lookup_cache->Update(receiver_map, key, offset);
return receiver->FastPropertyAt(offset);
}
@@ -4002,7 +4083,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
// become FAST_DOUBLE_ELEMENTS.
Handle<JSObject> js_object(args.at<JSObject>(0));
ElementsKind elements_kind = js_object->GetElementsKind();
- if (IsFastDoubleElementsKind(elements_kind)) {
+ if (IsFastElementsKind(elements_kind) &&
+ !IsFastObjectElementsKind(elements_kind)) {
FixedArrayBase* elements = js_object->elements();
if (args.at<Smi>(1)->value() >= elements->length()) {
if (IsFastHoleyElementsKind(elements_kind)) {
@@ -4015,9 +4097,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_KeyedGetProperty) {
isolate);
if (maybe_object->IsFailure()) return maybe_object;
}
- } else {
- ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
- !IsFastElementsKind(elements_kind));
}
}
} else if (args[0]->IsString() && args[1]->IsSmi()) {
@@ -4140,34 +4219,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_DefineOrRedefineDataProperty) {
}
-// Return property without being observable by accessors or interceptors.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetDataProperty) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
- CONVERT_ARG_HANDLE_CHECKED(String, key, 1);
- LookupResult lookup(isolate);
- object->LookupRealNamedProperty(*key, &lookup);
- if (!lookup.IsFound()) return isolate->heap()->undefined_value();
- switch (lookup.type()) {
- case NORMAL:
- return lookup.holder()->GetNormalizedProperty(&lookup);
- case FIELD:
- return lookup.holder()->FastPropertyAt(
- lookup.GetFieldIndex().field_index());
- case CONSTANT_FUNCTION:
- return lookup.GetConstantFunction();
- case CALLBACKS:
- case HANDLER:
- case INTERCEPTOR:
- case TRANSITION:
- return isolate->heap()->undefined_value();
- case NONEXISTENT:
- UNREACHABLE();
- }
- return isolate->heap()->undefined_value();
-}
-
-
MaybeObject* Runtime::SetObjectProperty(Isolate* isolate,
Handle<Object> object,
Handle<Object> key,
@@ -4627,6 +4678,41 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsPropertyEnumerable) {
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_ARG_CHECKED(String, key, 1);
+ uint32_t index;
+ if (key->AsArrayIndex(&index)) {
+ JSObject::LocalElementType type = object->HasLocalElement(index);
+ switch (type) {
+ case JSObject::UNDEFINED_ELEMENT:
+ case JSObject::STRING_CHARACTER_ELEMENT:
+ return isolate->heap()->false_value();
+ case JSObject::INTERCEPTED_ELEMENT:
+ case JSObject::FAST_ELEMENT:
+ return isolate->heap()->true_value();
+ case JSObject::DICTIONARY_ELEMENT: {
+ if (object->IsJSGlobalProxy()) {
+ Object* proto = object->GetPrototype();
+ if (proto->IsNull()) {
+ return isolate->heap()->false_value();
+ }
+ ASSERT(proto->IsJSGlobalObject());
+ object = JSObject::cast(proto);
+ }
+ FixedArray* elements = FixedArray::cast(object->elements());
+ SeededNumberDictionary* dictionary = NULL;
+ if (elements->map() ==
+ isolate->heap()->non_strict_arguments_elements_map()) {
+ dictionary = SeededNumberDictionary::cast(elements->get(1));
+ } else {
+ dictionary = SeededNumberDictionary::cast(elements);
+ }
+ int entry = dictionary->FindEntry(index);
+ ASSERT(entry != SeededNumberDictionary::kNotFound);
+ PropertyDetails details = dictionary->DetailsAt(entry);
+ return isolate->heap()->ToBoolean(!details.IsDontEnum());
+ }
+ }
+ }
+
PropertyAttributes att = object->GetLocalPropertyAttribute(key);
return isolate->heap()->ToBoolean(att != ABSENT && (att & DONT_ENUM) == 0);
}
@@ -5026,10 +5112,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
// Fast case: short integer or some sorts of junk values.
int len = subject->length();
- if (subject->IsSeqOneByteString()) {
+ if (subject->IsSeqAsciiString()) {
if (len == 0) return Smi::FromInt(0);
- char const* data = SeqOneByteString::cast(subject)->GetChars();
+ char const* data = SeqAsciiString::cast(subject)->GetChars();
bool minus = (data[0] == '-');
int start_pos = (minus ? 1 : 0);
@@ -5073,22 +5159,46 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToNumber) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_NewString) {
- CONVERT_SMI_ARG_CHECKED(length, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
- if (length == 0) return isolate->heap()->empty_string();
- if (is_one_byte) {
- return isolate->heap()->AllocateRawOneByteString(length);
- } else {
- return isolate->heap()->AllocateRawTwoByteString(length);
+RUNTIME_FUNCTION(MaybeObject*, Runtime_StringFromCharCodeArray) {
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+
+ CONVERT_ARG_CHECKED(JSArray, codes, 0);
+ int length = Smi::cast(codes->length())->value();
+
+ // Check if the string can be ASCII.
+ int i;
+ for (i = 0; i < length; i++) {
+ Object* element;
+ { MaybeObject* maybe_element = codes->GetElement(i);
+ // We probably can't get an exception here, but just in order to enforce
+ // the checking of inputs in the runtime calls we check here.
+ if (!maybe_element->ToObject(&element)) return maybe_element;
+ }
+ CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+ if ((chr & 0xffff) > String::kMaxAsciiCharCode)
+ break;
}
-}
+ MaybeObject* maybe_object = NULL;
+ if (i == length) { // The string is ASCII.
+ maybe_object = isolate->heap()->AllocateRawAsciiString(length);
+ } else { // The string is not ASCII.
+ maybe_object = isolate->heap()->AllocateRawTwoByteString(length);
+ }
-RUNTIME_FUNCTION(MaybeObject*, Runtime_TruncateString) {
- CONVERT_ARG_CHECKED(SeqString, string, 0);
- CONVERT_SMI_ARG_CHECKED(new_length, 1);
- return string->Truncate(new_length);
+ Object* object = NULL;
+ if (!maybe_object->ToObject(&object)) return maybe_object;
+ String* result = String::cast(object);
+ for (int i = 0; i < length; i++) {
+ Object* element;
+ { MaybeObject* maybe_element = codes->GetElement(i);
+ if (!maybe_element->ToObject(&element)) return maybe_element;
+ }
+ CONVERT_NUMBER_CHECKED(int, chr, Int32, element);
+ result->Set(i, chr & 0xffff);
+ }
+ return result;
}
@@ -5166,7 +5276,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIEscape) {
}
Object* o;
{ MaybeObject* maybe_o =
- isolate->heap()->AllocateRawOneByteString(escaped_length);
+ isolate->heap()->AllocateRawAsciiString(escaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
String* destination = String::cast(o);
@@ -5274,7 +5384,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_URIUnescape) {
Object* o;
{ MaybeObject* maybe_o =
ascii ?
- isolate->heap()->AllocateRawOneByteString(unescaped_length) :
+ isolate->heap()->AllocateRawAsciiString(unescaped_length) :
isolate->heap()->AllocateRawTwoByteString(unescaped_length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
@@ -5372,8 +5482,8 @@ MaybeObject* AllocateRawString<SeqTwoByteString>(Isolate* isolate, int length) {
template <>
-MaybeObject* AllocateRawString<SeqOneByteString>(Isolate* isolate, int length) {
- return isolate->heap()->AllocateRawOneByteString(length);
+MaybeObject* AllocateRawString<SeqAsciiString>(Isolate* isolate, int length) {
+ return isolate->heap()->AllocateRawAsciiString(length);
}
@@ -5524,7 +5634,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONString) {
return QuoteJsonString<uc16, SeqTwoByteString, false>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqOneByteString, false>(isolate,
+ return QuoteJsonString<char, SeqAsciiString, false>(isolate,
flat.ToAsciiVector());
}
}
@@ -5547,7 +5657,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringComma) {
return QuoteJsonString<uc16, SeqTwoByteString, true>(isolate,
flat.ToUC16Vector());
} else {
- return QuoteJsonString<char, SeqOneByteString, true>(isolate,
+ return QuoteJsonString<char, SeqAsciiString, true>(isolate,
flat.ToAsciiVector());
}
}
@@ -5639,7 +5749,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
}
if (ascii) {
- return QuoteJsonStringArray<char, SeqOneByteString>(isolate,
+ return QuoteJsonStringArray<char, SeqAsciiString>(isolate,
elements,
worst_case_length);
} else {
@@ -5650,14 +5760,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_QuoteJSONStringArray) {
}
-RUNTIME_FUNCTION(MaybeObject*, Runtime_BasicJSONStringify) {
- ASSERT(args.length() == 1);
- HandleScope scope(isolate);
- BasicJsonStringifier stringifier(isolate);
- return stringifier.Stringify(Handle<Object>(args[0]));
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_StringParseInt) {
NoHandleAllocation ha;
@@ -5704,8 +5806,8 @@ MUST_USE_RESULT static MaybeObject* ConvertCaseHelper(
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
Object* o;
- { MaybeObject* maybe_o = s->IsOneByteRepresentation()
- ? isolate->heap()->AllocateRawOneByteString(length)
+ { MaybeObject* maybe_o = s->IsAsciiRepresentation()
+ ? isolate->heap()->AllocateRawAsciiString(length)
: isolate->heap()->AllocateRawTwoByteString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
@@ -5936,14 +6038,14 @@ MUST_USE_RESULT static MaybeObject* ConvertCase(
// character is also ASCII. This is currently the case, but it
// might break in the future if we implement more context and locale
// dependent upper/lower conversions.
- if (s->IsSeqOneByteString()) {
+ if (s->IsSeqAsciiString()) {
Object* o;
- { MaybeObject* maybe_o = isolate->heap()->AllocateRawOneByteString(length);
+ { MaybeObject* maybe_o = isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_o->ToObject(&o)) return maybe_o;
}
- SeqOneByteString* result = SeqOneByteString::cast(o);
+ SeqAsciiString* result = SeqAsciiString::cast(o);
bool has_changed_character = ConvertTraits::AsciiConverter::Convert(
- result->GetChars(), SeqOneByteString::cast(s)->GetChars(), length);
+ result->GetChars(), SeqAsciiString::cast(s)->GetChars(), length);
return has_changed_character ? result : s;
}
@@ -6145,7 +6247,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringToArray) {
Handle<FixedArray> elements;
int position = 0;
- if (s->IsFlat() && s->IsOneByteRepresentation()) {
+ if (s->IsFlat() && s->IsAsciiRepresentation()) {
// Try using cached chars where possible.
Object* obj;
{ MaybeObject* maybe_obj =
@@ -6518,10 +6620,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_StringBuilderConcat) {
if (ascii) {
{ MaybeObject* maybe_object =
- isolate->heap()->AllocateRawOneByteString(length);
+ isolate->heap()->AllocateRawAsciiString(length);
if (!maybe_object->ToObject(&object)) return maybe_object;
}
- SeqOneByteString* answer = SeqOneByteString::cast(object);
+ SeqAsciiString* answer = SeqAsciiString::cast(object);
StringBuilderConcatHelper(special,
answer->GetChars(),
fixed_array,
@@ -6680,10 +6782,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
// Find total length of join result.
int string_length = 0;
- bool is_ascii = separator->IsOneByteRepresentation();
+ bool is_ascii = separator->IsAsciiRepresentation();
int max_string_length;
if (is_ascii) {
- max_string_length = SeqOneByteString::kMaxLength;
+ max_string_length = SeqAsciiString::kMaxLength;
} else {
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6697,7 +6799,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
RUNTIME_ASSERT(elements->get(i + 1)->IsString());
String* string = String::cast(elements->get(i + 1));
int length = string->length();
- if (is_ascii && !string->IsOneByteRepresentation()) {
+ if (is_ascii && !string->IsAsciiRepresentation()) {
is_ascii = false;
max_string_length = SeqTwoByteString::kMaxLength;
}
@@ -6733,10 +6835,10 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_SparseJoinWithSeparator) {
if (is_ascii) {
MaybeObject* result_allocation =
- isolate->heap()->AllocateRawOneByteString(string_length);
+ isolate->heap()->AllocateRawAsciiString(string_length);
if (result_allocation->IsFailure()) return result_allocation;
- SeqOneByteString* result_string =
- SeqOneByteString::cast(result_allocation->ToObjectUnchecked());
+ SeqAsciiString* result_string =
+ SeqAsciiString::cast(result_allocation->ToObjectUnchecked());
JoinSparseArrayWithSeparator<char>(elements,
elements_length,
array_length,
@@ -7139,8 +7241,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_Math_exp) {
isolate->counters()->math_exp()->Increment();
CONVERT_DOUBLE_ARG_CHECKED(x, 0);
- lazily_initialize_fast_exp();
- return isolate->heap()->NumberFromDouble(fast_exp(x));
+ return isolate->transcendental_cache()->Get(TranscendentalCache::EXP, x);
}
@@ -7832,34 +7933,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParallelRecompile) {
HandleScope handle_scope(isolate);
ASSERT(FLAG_parallel_recompilation);
Compiler::RecompileParallel(args.at<JSFunction>(0));
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ForceParallelRecompile) {
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
- return isolate->Throw(
- *isolate->factory()->LookupAsciiSymbol("Recompile queue is full."));
- }
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- fun->ReplaceCode(isolate->builtins()->builtin(Builtins::kParallelRecompile));
- Compiler::RecompileParallel(fun);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_InstallRecompiledCode) {
- if (!V8::UseCrankshaft()) return isolate->heap()->undefined_value();
- HandleScope handle_scope(isolate);
- ASSERT(FLAG_parallel_recompilation && FLAG_manual_parallel_recompilation);
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- OptimizingCompilerThread* opt_thread = isolate->optimizing_compiler_thread();
- Handle<SharedFunctionInfo> shared(fun->shared());
- while (*opt_thread->InstallNextOptimizedFunction() != *shared) { }
- return isolate->heap()->undefined_value();
+ return *isolate->factory()->undefined_value();
}
@@ -8134,8 +8208,15 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CompileForOnStackReplacement) {
function->PrintName();
PrintF("]\n");
}
- InterruptStub interrupt_stub;
- Handle<Code> check_code = interrupt_stub.GetCode();
+ Handle<Code> check_code;
+ if (FLAG_count_based_interrupts) {
+ InterruptStub interrupt_stub;
+ check_code = interrupt_stub.GetCode();
+ } else // NOLINT
+ { // NOLINT
+ StackCheckStub check_stub;
+ check_code = check_stub.GetCode();
+ }
Handle<Code> replacement_code = isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(*unoptimized,
*check_code,
@@ -8397,89 +8478,20 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_IsJSModule) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushModuleContext) {
- ASSERT(args.length() == 2);
- CONVERT_SMI_ARG_CHECKED(index, 0);
-
- if (!args[1]->IsScopeInfo()) {
- // Module already initialized. Find hosting context and retrieve context.
- Context* host = Context::cast(isolate->context())->global_context();
- Context* context = Context::cast(host->get(index));
- ASSERT(context->previous() == isolate->context());
- isolate->set_context(context);
- return context;
- }
-
- CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+ NoHandleAllocation ha;
+ ASSERT(args.length() == 1);
+ CONVERT_ARG_HANDLE_CHECKED(JSModule, instance, 0);
- // Allocate module context.
- HandleScope scope(isolate);
- Factory* factory = isolate->factory();
- Handle<Context> context = factory->NewModuleContext(scope_info);
- Handle<JSModule> module = factory->NewJSModule(context, scope_info);
- context->set_module(*module);
+ Context* context = Context::cast(instance->context());
Context* previous = isolate->context();
+ ASSERT(context->IsModuleContext());
+ // Initialize the context links.
context->set_previous(previous);
context->set_closure(previous->closure());
context->set_global_object(previous->global_object());
- isolate->set_context(*context);
-
- // Find hosting scope and initialize internal variable holding module there.
- previous->global_context()->set(index, *context);
-
- return *context;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_DeclareModules) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 1);
- CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
- Context* host_context = isolate->context();
-
- for (int i = 0; i < descriptions->length(); ++i) {
- Handle<ModuleInfo> description(ModuleInfo::cast(descriptions->get(i)));
- int host_index = description->host_index();
- Handle<Context> context(Context::cast(host_context->get(host_index)));
- Handle<JSModule> module(context->module());
-
- for (int j = 0; j < description->length(); ++j) {
- Handle<String> name(description->name(j));
- VariableMode mode = description->mode(j);
- int index = description->index(j);
- switch (mode) {
- case VAR:
- case LET:
- case CONST:
- case CONST_HARMONY: {
- PropertyAttributes attr =
- IsImmutableVariableMode(mode) ? FROZEN : SEALED;
- Handle<AccessorInfo> info =
- Accessors::MakeModuleExport(name, index, attr);
- Handle<Object> result = SetAccessor(module, info);
- ASSERT(!(result.is_null() || result->IsUndefined()));
- USE(result);
- break;
- }
- case MODULE: {
- Object* referenced_context = Context::cast(host_context)->get(index);
- Handle<JSModule> value(Context::cast(referenced_context)->module());
- JSReceiver::SetProperty(module, name, value, FROZEN, kStrictMode);
- break;
- }
- case INTERNAL:
- case TEMPORARY:
- case DYNAMIC:
- case DYNAMIC_GLOBAL:
- case DYNAMIC_LOCAL:
- UNREACHABLE();
- }
- }
-
- JSObject::PreventExtensions(module);
- }
+ isolate->set_context(context);
- ASSERT(!isolate->has_pending_exception());
- return isolate->heap()->undefined_value();
+ return context;
}
@@ -8997,7 +9009,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ParseJson) {
source = Handle<String>(source->TryFlattenGetString());
// Optimized fast case where we only have ASCII characters.
Handle<Object> result;
- if (source->IsSeqOneByteString()) {
+ if (source->IsSeqAsciiString()) {
result = JsonParser<true>::Parse(source, zone);
} else {
result = JsonParser<false>::Parse(source, zone);
@@ -9167,7 +9179,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_AllocateInNewSpace) {
RUNTIME_FUNCTION(MaybeObject*, Runtime_PushIfAbsent) {
ASSERT(args.length() == 2);
CONVERT_ARG_CHECKED(JSArray, array, 0);
- CONVERT_ARG_CHECKED(JSReceiver, element, 1);
+ CONVERT_ARG_CHECKED(JSObject, element, 1);
RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
int length = Smi::cast(array->length())->value();
FixedArray* elements = FixedArray::cast(array->elements());
@@ -9236,7 +9248,7 @@ class ArrayConcatVisitor {
clear_storage();
set_storage(*result);
}
- }
+}
void increase_index_offset(uint32_t delta) {
if (JSObject::kMaxElementCount - index_offset_ < delta) {
@@ -9327,22 +9339,10 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
break;
}
case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS: {
- // Fast elements can't have lengths that are not representable by
- // a 32-bit signed integer.
- ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
- int fast_length = static_cast<int>(length);
- if (array->elements()->IsFixedArray()) {
- ASSERT(FixedArray::cast(array->elements())->length() == 0);
- break;
- }
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(array->elements()));
- for (int i = 0; i < fast_length; i++) {
- if (!elements->is_the_hole(i)) element_count++;
- }
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ // TODO(1810): Decide if it's worthwhile to implement this.
+ UNREACHABLE();
break;
- }
case DICTIONARY_ELEMENTS: {
Handle<SeededNumberDictionary> dictionary(
SeededNumberDictionary::cast(array->elements()));
@@ -9585,27 +9585,8 @@ static bool IterateElements(Isolate* isolate,
}
case FAST_HOLEY_DOUBLE_ELEMENTS:
case FAST_DOUBLE_ELEMENTS: {
- // Run through the elements FixedArray and use HasElement and GetElement
- // to check the prototype for missing elements.
- Handle<FixedDoubleArray> elements(
- FixedDoubleArray::cast(receiver->elements()));
- int fast_length = static_cast<int>(length);
- ASSERT(fast_length <= elements->length());
- for (int j = 0; j < fast_length; j++) {
- HandleScope loop_scope(isolate);
- if (!elements->is_the_hole(j)) {
- double double_value = elements->get_scalar(j);
- Handle<Object> element_value =
- isolate->factory()->NewNumber(double_value);
- visitor->visit(j, element_value);
- } else if (receiver->HasElement(j)) {
- // Call GetElement on receiver, not its prototype, or getters won't
- // have the correct receiver.
- Handle<Object> element_value = Object::GetElement(receiver, j);
- RETURN_IF_EMPTY_HANDLE_VALUE(isolate, element_value, false);
- visitor->visit(j, element_value);
- }
- }
+ // TODO(1810): Decide if it's worthwhile to implement this.
+ UNREACHABLE();
break;
}
case DICTIONARY_ELEMENTS: {
@@ -9708,51 +9689,48 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
// that mutate other arguments (but will otherwise be precise).
// The number of elements is precise if there are no inherited elements.
- ElementsKind kind = FAST_SMI_ELEMENTS;
-
uint32_t estimate_result_length = 0;
uint32_t estimate_nof_elements = 0;
- for (int i = 0; i < argument_count; i++) {
- HandleScope loop_scope;
- Handle<Object> obj(elements->get(i));
- uint32_t length_estimate;
- uint32_t element_estimate;
- if (obj->IsJSArray()) {
- Handle<JSArray> array(Handle<JSArray>::cast(obj));
- length_estimate = static_cast<uint32_t>(array->length()->Number());
- if (length_estimate != 0) {
- ElementsKind array_kind =
- GetPackedElementsKind(array->map()->elements_kind());
- if (IsMoreGeneralElementsKindTransition(kind, array_kind)) {
- kind = array_kind;
- }
- }
- element_estimate = EstimateElementCount(array);
- } else {
- if (obj->IsHeapObject()) {
- if (obj->IsNumber()) {
- if (IsMoreGeneralElementsKindTransition(kind, FAST_DOUBLE_ELEMENTS)) {
- kind = FAST_DOUBLE_ELEMENTS;
+ {
+ for (int i = 0; i < argument_count; i++) {
+ HandleScope loop_scope;
+ Handle<Object> obj(elements->get(i));
+ uint32_t length_estimate;
+ uint32_t element_estimate;
+ if (obj->IsJSArray()) {
+ Handle<JSArray> array(Handle<JSArray>::cast(obj));
+ // TODO(1810): Find out if it's worthwhile to properly support
+ // arbitrary ElementsKinds. For now, pessimistically transition to
+ // FAST_*_ELEMENTS.
+ if (array->HasFastDoubleElements()) {
+ ElementsKind to_kind = FAST_ELEMENTS;
+ if (array->HasFastHoleyElements()) {
+ to_kind = FAST_HOLEY_ELEMENTS;
}
- } else if (IsMoreGeneralElementsKindTransition(kind, FAST_ELEMENTS)) {
- kind = FAST_ELEMENTS;
+ array = Handle<JSArray>::cast(
+ JSObject::TransitionElementsKind(array, to_kind));
}
+ length_estimate =
+ static_cast<uint32_t>(array->length()->Number());
+ element_estimate =
+ EstimateElementCount(array);
+ } else {
+ length_estimate = 1;
+ element_estimate = 1;
+ }
+ // Avoid overflows by capping at kMaxElementCount.
+ if (JSObject::kMaxElementCount - estimate_result_length <
+ length_estimate) {
+ estimate_result_length = JSObject::kMaxElementCount;
+ } else {
+ estimate_result_length += length_estimate;
+ }
+ if (JSObject::kMaxElementCount - estimate_nof_elements <
+ element_estimate) {
+ estimate_nof_elements = JSObject::kMaxElementCount;
+ } else {
+ estimate_nof_elements += element_estimate;
}
- length_estimate = 1;
- element_estimate = 1;
- }
- // Avoid overflows by capping at kMaxElementCount.
- if (JSObject::kMaxElementCount - estimate_result_length <
- length_estimate) {
- estimate_result_length = JSObject::kMaxElementCount;
- } else {
- estimate_result_length += length_estimate;
- }
- if (JSObject::kMaxElementCount - estimate_nof_elements <
- element_estimate) {
- estimate_nof_elements = JSObject::kMaxElementCount;
- } else {
- estimate_nof_elements += element_estimate;
}
}
@@ -9763,76 +9741,8 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_ArrayConcat) {
Handle<FixedArray> storage;
if (fast_case) {
- if (kind == FAST_DOUBLE_ELEMENTS) {
- Handle<FixedDoubleArray> double_storage =
- isolate->factory()->NewFixedDoubleArray(estimate_result_length);
- int j = 0;
- bool failure = false;
- for (int i = 0; i < argument_count; i++) {
- Handle<Object> obj(elements->get(i));
- if (obj->IsSmi()) {
- double_storage->set(j, Smi::cast(*obj)->value());
- j++;
- } else if (obj->IsNumber()) {
- double_storage->set(j, obj->Number());
- j++;
- } else {
- JSArray* array = JSArray::cast(*obj);
- uint32_t length = static_cast<uint32_t>(array->length()->Number());
- switch (array->map()->elements_kind()) {
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS: {
- // Empty fixed array indicates that there are no elements.
- if (array->elements()->IsFixedArray()) break;
- FixedDoubleArray* elements =
- FixedDoubleArray::cast(array->elements());
- for (uint32_t i = 0; i < length; i++) {
- if (elements->is_the_hole(i)) {
- failure = true;
- break;
- }
- double double_value = elements->get_scalar(i);
- double_storage->set(j, double_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_SMI_ELEMENTS: {
- FixedArray* elements(
- FixedArray::cast(array->elements()));
- for (uint32_t i = 0; i < length; i++) {
- Object* element = elements->get(i);
- if (element->IsTheHole()) {
- failure = true;
- break;
- }
- int32_t int_value = Smi::cast(element)->value();
- double_storage->set(j, int_value);
- j++;
- }
- break;
- }
- case FAST_HOLEY_ELEMENTS:
- ASSERT_EQ(0, length);
- break;
- default:
- UNREACHABLE();
- }
- }
- if (failure) break;
- }
- Handle<JSArray> array = isolate->factory()->NewJSArray(0);
- Smi* length = Smi::FromInt(j);
- Handle<Map> map;
- map = isolate->factory()->GetElementsTransitionMap(array, kind);
- array->set_map(*map);
- array->set_length(length);
- array->set_elements(*double_storage);
- return *array;
- }
- // The backing storage array must have non-existing elements to preserve
- // holes across concat operations.
+ // The backing storage array must have non-existing elements to
+ // preserve holes across concat operations.
storage = isolate->factory()->NewFixedArrayWithHoles(
estimate_result_length);
} else {
@@ -10046,8 +9956,8 @@ static MaybeObject* DebugLookupResultValue(Heap* heap,
return value;
case FIELD:
value =
- JSObject::cast(result->holder())->FastPropertyAt(
- result->GetFieldIndex().field_index());
+ JSObject::cast(
+ result->holder())->FastPropertyAt(result->GetFieldIndex());
if (value->IsTheHole()) {
return heap->undefined_value();
}
@@ -10686,8 +10596,7 @@ static bool CopyContextLocalsToScopeObject(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- scope_object,
+ SetProperty(scope_object,
Handle<String>(scope_info->ContextLocalName(i)),
Handle<Object>(context->get(context_index), isolate),
NONE,
@@ -10722,8 +10631,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- local_scope,
+ SetProperty(local_scope,
Handle<String>(scope_info->ParameterName(i)),
value,
NONE,
@@ -10735,8 +10643,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
for (int i = 0; i < scope_info->StackLocalCount(); ++i) {
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- local_scope,
+ SetProperty(local_scope,
Handle<String>(scope_info->StackLocalName(i)),
Handle<Object>(frame_inspector->GetExpression(i)),
NONE,
@@ -10770,8 +10677,7 @@ static Handle<JSObject> MaterializeLocalScopeWithFrameInspector(
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- local_scope,
+ SetProperty(local_scope,
key,
GetProperty(ext, key),
NONE,
@@ -10832,8 +10738,7 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
Handle<String> key(String::cast(keys->get(i)));
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- closure_scope,
+ SetProperty(closure_scope,
key,
GetProperty(ext, key),
NONE,
@@ -10846,52 +10751,6 @@ static Handle<JSObject> MaterializeClosure(Isolate* isolate,
}
-// This method copies structure of MaterializeClosure method above.
-static bool SetClosureVariableValue(Isolate* isolate,
- Handle<Context> context,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- ASSERT(context->IsFunctionContext());
-
- Handle<SharedFunctionInfo> shared(context->closure()->shared());
- Handle<ScopeInfo> scope_info(shared->scope_info());
-
- // Context locals to the context extension.
- for (int i = 0; i < scope_info->ContextLocalCount(); i++) {
- Handle<String> next_name(scope_info->ContextLocalName(i));
- if (variable_name->Equals(*next_name)) {
- VariableMode mode;
- InitializationFlag init_flag;
- int context_index =
- scope_info->ContextSlotIndex(*next_name, &mode, &init_flag);
- if (context_index < 0) {
- return false;
- }
- context->set(context_index, *new_value);
- return true;
- }
- }
-
- // Properties from the function context extension. This will
- // be variables introduced by eval.
- if (context->has_extension()) {
- Handle<JSObject> ext(JSObject::cast(context->extension()));
- if (ext->HasProperty(*variable_name)) {
- // We don't expect this to do anything except replacing property value.
- SetProperty(isolate,
- ext,
- variable_name,
- new_value,
- NONE,
- kNonStrictMode);
- return true;
- }
- }
-
- return false;
-}
-
-
// Create a plain JSObject which materializes the scope for the specified
// catch context.
static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
@@ -10903,12 +10762,7 @@ static Handle<JSObject> MaterializeCatchScope(Isolate* isolate,
isolate->factory()->NewJSObject(isolate->object_function());
RETURN_IF_EMPTY_HANDLE_VALUE(
isolate,
- SetProperty(isolate,
- catch_scope,
- name,
- thrown_object,
- NONE,
- kNonStrictMode),
+ SetProperty(catch_scope, name, thrown_object, NONE, kNonStrictMode),
Handle<JSObject>());
return catch_scope;
}
@@ -11172,33 +11026,6 @@ class ScopeIterator {
return Handle<JSObject>();
}
- bool SetVariableValue(Handle<String> variable_name,
- Handle<Object> new_value) {
- ASSERT(!failed_);
- switch (Type()) {
- case ScopeIterator::ScopeTypeGlobal:
- break;
- case ScopeIterator::ScopeTypeLocal:
- // TODO(2399): implement.
- break;
- case ScopeIterator::ScopeTypeWith:
- break;
- case ScopeIterator::ScopeTypeCatch:
- // TODO(2399): implement.
- break;
- case ScopeIterator::ScopeTypeClosure:
- return SetClosureVariableValue(isolate_, CurrentContext(),
- variable_name, new_value);
- case ScopeIterator::ScopeTypeBlock:
- // TODO(2399): should we implement it?
- break;
- case ScopeIterator::ScopeTypeModule:
- // TODO(2399): should we implement it?
- break;
- }
- return false;
- }
-
Handle<ScopeInfo> CurrentScopeInfo() {
ASSERT(!failed_);
if (!nested_scope_chain_.is_empty()) {
@@ -11438,64 +11265,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFunctionScopeDetails) {
}
-static bool SetScopeVariableValue(ScopeIterator* it, int index,
- Handle<String> variable_name,
- Handle<Object> new_value) {
- for (int n = 0; !it->Done() && n < index; it->Next()) {
- n++;
- }
- if (it->Done()) {
- return false;
- }
- return it->SetVariableValue(variable_name, new_value);
-}
-
-
-// Change variable value in closure or local scope
-// args[0]: number or JsFunction: break id or function
-// args[1]: number: frame index (when arg[0] is break id)
-// args[2]: number: inlined frame index (when arg[0] is break id)
-// args[3]: number: scope index
-// args[4]: string: variable name
-// args[5]: object: new value
-//
-// Return true if success and false otherwise
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetScopeVariableValue) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 6);
-
- // Check arguments.
- CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
- CONVERT_ARG_HANDLE_CHECKED(String, variable_name, 4);
- Handle<Object> new_value = args.at<Object>(5);
-
- bool res;
- if (args[0]->IsNumber()) {
- Object* check;
- { MaybeObject* maybe_check = Runtime_CheckExecutionState(
- RUNTIME_ARGUMENTS(isolate, args));
- if (!maybe_check->ToObject(&check)) return maybe_check;
- }
- CONVERT_SMI_ARG_CHECKED(wrapped_id, 1);
- CONVERT_NUMBER_CHECKED(int, inlined_jsframe_index, Int32, args[2]);
-
- // Get the frame where the debugging is performed.
- StackFrame::Id id = UnwrapFrameId(wrapped_id);
- JavaScriptFrameIterator frame_it(isolate, id);
- JavaScriptFrame* frame = frame_it.frame();
-
- ScopeIterator it(isolate, frame, inlined_jsframe_index);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- } else {
- CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
- ScopeIterator it(isolate, fun);
- res = SetScopeVariableValue(&it, index, variable_name, new_value);
- }
-
- return isolate->heap()->ToBoolean(res);
-}
-
-
RUNTIME_FUNCTION(MaybeObject*, Runtime_DebugPrintScopes) {
HandleScope scope(isolate);
ASSERT(args.length() == 0);
@@ -13080,6 +12849,47 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetScript) {
}
+// Determines whether the given stack frame should be displayed in
+// a stack trace. The caller is the error constructor that asked
+// for the stack trace to be collected. The first time a construct
+// call to this function is encountered it is skipped. The seen_caller
+// in/out parameter is used to remember if the caller has been seen
+// yet.
+static bool ShowFrameInStackTrace(StackFrame* raw_frame,
+ Object* caller,
+ bool* seen_caller) {
+ // Only display JS frames.
+ if (!raw_frame->is_java_script()) {
+ return false;
+ }
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ Object* raw_fun = frame->function();
+ // Not sure when this can happen but skip it just in case.
+ if (!raw_fun->IsJSFunction()) {
+ return false;
+ }
+ if ((raw_fun == caller) && !(*seen_caller)) {
+ *seen_caller = true;
+ return false;
+ }
+ // Skip all frames until we've seen the caller.
+ if (!(*seen_caller)) return false;
+ // Also, skip non-visible built-in functions and any call with the builtins
+ // object as receiver, so as to not reveal either the builtins object or
+ // an internal function.
+ // The --builtins-in-stack-traces command line flag allows including
+ // internal call sites in the stack trace for debugging purposes.
+ if (!FLAG_builtins_in_stack_traces) {
+ JSFunction* fun = JSFunction::cast(raw_fun);
+ if (frame->receiver()->IsJSBuiltinsObject() ||
+ (fun->IsBuiltin() && !fun->shared()->native())) {
+ return false;
+ }
+ }
+ return true;
+}
+
+
// Collect the raw data for a stack trace. Returns an array of 4
// element segments each containing a receiver, function, code and
// native code offset.
@@ -13090,23 +12900,57 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_CollectStackTrace) {
CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
HandleScope scope(isolate);
- // Optionally capture a more detailed stack trace for the message.
- isolate->CaptureAndSetDetailedStackTrace(error_object);
- // Capture a simple stack trace for the stack property.
- return *isolate->CaptureSimpleStackTrace(error_object, caller, limit);
-}
+ Factory* factory = isolate->factory();
+ limit = Max(limit, 0); // Ensure that limit is not negative.
+ int initial_size = Min(limit, 10);
+ Handle<FixedArray> elements =
+ factory->NewFixedArrayWithHoles(initial_size * 4);
-// Retrieve the raw stack trace collected on stack overflow and delete
-// it since it is used only once to avoid keeping it alive.
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetOverflowedRawStackTrace) {
- ASSERT_EQ(args.length(), 1);
- CONVERT_ARG_CHECKED(JSObject, error_object, 0);
- String* key = isolate->heap()->hidden_stack_trace_symbol();
- Object* result = error_object->GetHiddenProperty(key);
- RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
- error_object->DeleteHiddenProperty(key);
- return result;
+ StackFrameIterator iter(isolate);
+ // If the caller parameter is a function we skip frames until we're
+ // under it before starting to collect.
+ bool seen_caller = !caller->IsJSFunction();
+ int cursor = 0;
+ int frames_seen = 0;
+ while (!iter.done() && frames_seen < limit) {
+ StackFrame* raw_frame = iter.frame();
+ if (ShowFrameInStackTrace(raw_frame, *caller, &seen_caller)) {
+ frames_seen++;
+ JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
+ // Set initial size to the maximum inlining level + 1 for the outermost
+ // function.
+ List<FrameSummary> frames(Compiler::kMaxInliningLevels + 1);
+ frame->Summarize(&frames);
+ for (int i = frames.length() - 1; i >= 0; i--) {
+ if (cursor + 4 > elements->length()) {
+ int new_capacity = JSObject::NewElementsCapacity(elements->length());
+ Handle<FixedArray> new_elements =
+ factory->NewFixedArrayWithHoles(new_capacity);
+ for (int i = 0; i < cursor; i++) {
+ new_elements->set(i, elements->get(i));
+ }
+ elements = new_elements;
+ }
+ ASSERT(cursor + 4 <= elements->length());
+
+ Handle<Object> recv = frames[i].receiver();
+ Handle<JSFunction> fun = frames[i].function();
+ Handle<Code> code = frames[i].code();
+ Handle<Smi> offset(Smi::FromInt(frames[i].offset()));
+ elements->set(cursor++, *recv);
+ elements->set(cursor++, *fun);
+ elements->set(cursor++, *code);
+ elements->set(cursor++, *offset);
+ }
+ }
+ iter.Advance();
+ }
+ Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
+ // Capture and attach a more detailed stack trace if necessary.
+ isolate->CaptureAndSetCurrentStackTraceFor(error_object);
+ result->set_length(Smi::FromInt(cursor));
+ return *result;
}
@@ -13118,7 +12962,7 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetV8Version) {
const char* version_string = v8::V8::GetVersion();
- return isolate->heap()->AllocateStringFromOneByte(CStrVector(version_string),
+ return isolate->heap()->AllocateStringFromAscii(CStrVector(version_string),
NOT_TENURED);
}
@@ -13230,6 +13074,33 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_GetFromCache) {
}
+RUNTIME_FUNCTION(MaybeObject*, Runtime_NewMessageObject) {
+ HandleScope scope(isolate);
+ CONVERT_ARG_HANDLE_CHECKED(String, type, 0);
+ CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 1);
+ return *isolate->factory()->NewJSMessageObject(
+ type,
+ arguments,
+ 0,
+ 0,
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value(),
+ isolate->factory()->undefined_value());
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetType) {
+ CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+ return message->type();
+}
+
+
+RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetArguments) {
+ CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
+ return message->arguments();
+}
+
+
RUNTIME_FUNCTION(MaybeObject*, Runtime_MessageGetStartPosition) {
CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
return Smi::FromInt(message->start_position());
@@ -13342,88 +13213,6 @@ RUNTIME_FUNCTION(MaybeObject*, Runtime_HaveSameMap) {
return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
}
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_IsObserved) {
- ASSERT(args.length() == 1);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (obj->IsNull()) return isolate->heap()->false_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
- }
- return isolate->heap()->ToBoolean(obj->map()->is_observed());
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetIsObserved) {
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
- CONVERT_BOOLEAN_ARG_CHECKED(is_observed, 1);
- if (obj->IsJSGlobalProxy()) {
- Object* proto = obj->GetPrototype();
- if (obj->IsNull()) return isolate->heap()->undefined_value();
- ASSERT(proto->IsJSGlobalObject());
- obj = JSReceiver::cast(proto);
- }
- if (obj->map()->is_observed() != is_observed) {
- MaybeObject* maybe = obj->map()->Copy();
- Map* map;
- if (!maybe->To(&map)) return maybe;
- map->set_is_observed(is_observed);
- obj->set_map(map);
- }
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_SetObserverDeliveryPending) {
- ASSERT(args.length() == 0);
- isolate->set_observer_delivery_pending(true);
- return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_GetObservationState) {
- ASSERT(args.length() == 0);
- return isolate->heap()->observation_state();
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_CreateObjectHashTable) {
- ASSERT(args.length() == 0);
- return ObjectHashTable::Allocate(0);
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableGet) {
- NoHandleAllocation ha;
- ASSERT(args.length() == 2);
- CONVERT_ARG_CHECKED(ObjectHashTable, table, 0);
- Object* key = args[1];
- if (key->IsJSGlobalProxy()) {
- key = key->GetPrototype();
- if (key->IsNull()) return isolate->heap()->undefined_value();
- }
- Object* lookup = table->Lookup(key);
- return lookup->IsTheHole() ? isolate->heap()->undefined_value() : lookup;
-}
-
-
-RUNTIME_FUNCTION(MaybeObject*, Runtime_ObjectHashTableSet) {
- HandleScope scope(isolate);
- ASSERT(args.length() == 3);
- CONVERT_ARG_HANDLE_CHECKED(ObjectHashTable, table, 0);
- Handle<Object> key = args.at<Object>(1);
- if (key->IsJSGlobalProxy()) {
- key = handle(key->GetPrototype(), isolate);
- if (key->IsNull()) return *table;
- }
- Handle<Object> value = args.at<Object>(2);
- return *PutIntoObjectHashTable(table, key, value);
-}
-
-
// ----------------------------------------------------------------------------
// Implementation of Runtime
diff --git a/deps/v8/src/runtime.h b/deps/v8/src/runtime.h
index c77f98821..c9939d06c 100644
--- a/deps/v8/src/runtime.h
+++ b/deps/v8/src/runtime.h
@@ -85,9 +85,7 @@ namespace internal {
F(NewStrictArgumentsFast, 3, 1) \
F(LazyCompile, 1, 1) \
F(LazyRecompile, 1, 1) \
- F(ParallelRecompile, 1, 1) \
- F(ForceParallelRecompile, 1, 1) \
- F(InstallRecompiledCode, 1, 1) \
+ F(ParallelRecompile, 1, 1) \
F(NotifyDeoptimized, 1, 1) \
F(NotifyOSR, 0, 1) \
F(DeoptimizeFunction, 1, 1) \
@@ -113,6 +111,7 @@ namespace internal {
F(Typeof, 1, 1) \
\
F(StringToNumber, 1, 1) \
+ F(StringFromCharCodeArray, 1, 1) \
F(StringParseInt, 2, 1) \
F(StringParseFloat, 1, 1) \
F(StringToLowerCase, 1, 1) \
@@ -121,6 +120,9 @@ namespace internal {
F(CharFromCode, 1, 1) \
F(URIEscape, 1, 1) \
F(URIUnescape, 1, 1) \
+ F(QuoteJSONString, 1, 1) \
+ F(QuoteJSONStringComma, 1, 1) \
+ F(QuoteJSONStringArray, 1, 1) \
\
F(NumberToString, 1, 1) \
F(NumberToStringSkipCache, 1, 1) \
@@ -189,10 +191,6 @@ namespace internal {
\
/* JSON */ \
F(ParseJson, 1, 1) \
- F(BasicJSONStringify, 1, 1) \
- F(QuoteJSONString, 1, 1) \
- F(QuoteJSONStringComma, 1, 1) \
- F(QuoteJSONStringArray, 1, 1) \
\
/* Strings */ \
F(StringCharCodeAt, 2, 1) \
@@ -206,8 +204,6 @@ namespace internal {
F(StringTrim, 3, 1) \
F(StringToArray, 2, 1) \
F(NewStringWrapper, 1, 1) \
- F(NewString, 2, 1) \
- F(TruncateString, 2, 1) \
\
/* Numbers */ \
F(NumberToRadixString, 2, 1) \
@@ -236,7 +232,6 @@ namespace internal {
F(FunctionIsBuiltin, 1, 1) \
F(GetScript, 1, 1) \
F(CollectStackTrace, 3, 1) \
- F(GetOverflowedRawStackTrace, 1, 1) \
F(GetV8Version, 0, 1) \
\
F(ClassOf, 1, 1) \
@@ -271,7 +266,6 @@ namespace internal {
F(DefineOrRedefineDataProperty, 4, 1) \
F(DefineOrRedefineAccessorProperty, 5, 1) \
F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
- F(GetDataProperty, 2, 1) \
\
/* Arrays */ \
F(RemoveArrayHoles, 2, 1) \
@@ -307,7 +301,6 @@ namespace internal {
F(SetAdd, 2, 1) \
F(SetHas, 2, 1) \
F(SetDelete, 2, 1) \
- F(SetGetSize, 1, 1) \
\
/* Harmony maps */ \
F(MapInitialize, 1, 1) \
@@ -315,7 +308,6 @@ namespace internal {
F(MapHas, 2, 1) \
F(MapDelete, 2, 1) \
F(MapSet, 3, 1) \
- F(MapGetSize, 1, 1) \
\
/* Harmony weakmaps */ \
F(WeakMapInitialize, 1, 1) \
@@ -324,15 +316,6 @@ namespace internal {
F(WeakMapDelete, 2, 1) \
F(WeakMapSet, 3, 1) \
\
- /* Harmony observe */ \
- F(IsObserved, 1, 1) \
- F(SetIsObserved, 2, 1) \
- F(SetObserverDeliveryPending, 0, 1) \
- F(GetObservationState, 0, 1) \
- F(CreateObjectHashTable, 0, 1) \
- F(ObjectHashTableGet, 2, 1) \
- F(ObjectHashTableSet, 3, 1) \
- \
/* Statements */ \
F(NewClosure, 3, 1) \
F(NewObject, 1, 1) \
@@ -352,7 +335,7 @@ namespace internal {
F(PushWithContext, 2, 1) \
F(PushCatchContext, 3, 1) \
F(PushBlockContext, 2, 1) \
- F(PushModuleContext, 2, 1) \
+ F(PushModuleContext, 1, 1) \
F(DeleteContextSlot, 2, 1) \
F(LoadContextSlot, 2, 2) \
F(LoadContextSlotNoReferenceError, 2, 2) \
@@ -360,7 +343,6 @@ namespace internal {
\
/* Declarations and initialization */ \
F(DeclareGlobals, 3, 1) \
- F(DeclareModules, 1, 1) \
F(DeclareContextSlot, 4, 1) \
F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
F(InitializeConstGlobal, 2, 1) \
@@ -381,6 +363,9 @@ namespace internal {
F(GetFromCache, 2, 1) \
\
/* Message objects */ \
+ F(NewMessageObject, 2, 1) \
+ F(MessageGetType, 1, 1) \
+ F(MessageGetArguments, 1, 1) \
F(MessageGetStartPosition, 1, 1) \
F(MessageGetScript, 1, 1) \
\
@@ -433,7 +418,6 @@ namespace internal {
F(GetScopeDetails, 4, 1) \
F(GetFunctionScopeCount, 1, 1) \
F(GetFunctionScopeDetails, 2, 1) \
- F(SetScopeVariableValue, 6, 1) \
F(DebugPrintScopes, 0, 1) \
F(GetThreadCount, 1, 1) \
F(GetThreadDetails, 2, 1) \
@@ -530,8 +514,6 @@ namespace internal {
F(DateField, 2 /* date object, field index */, 1) \
F(StringCharFromCode, 1, 1) \
F(StringCharAt, 2, 1) \
- F(OneByteSeqStringSetChar, 3, 1) \
- F(TwoByteSeqStringSetChar, 3, 1) \
F(ObjectEquals, 2, 1) \
F(RandomHeapNumber, 0, 1) \
F(IsObject, 1, 1) \
diff --git a/deps/v8/src/scopeinfo.cc b/deps/v8/src/scopeinfo.cc
index c0b2c4c8e..02b432398 100644
--- a/deps/v8/src/scopeinfo.cc
+++ b/deps/v8/src/scopeinfo.cc
@@ -321,7 +321,6 @@ int ScopeInfo::ContextSlotIndex(String* name,
return result;
}
}
- // Cache as not found. Mode and init flag don't matter.
context_slot_cache->Update(this, name, INTERNAL, kNeedsInitialization, -1);
}
return -1;
@@ -505,32 +504,4 @@ void ScopeInfo::Print() {
}
#endif // DEBUG
-
-//---------------------------------------------------------------------------
-// ModuleInfo.
-
-Handle<ModuleInfo> ModuleInfo::Create(
- Isolate* isolate, Interface* interface, Scope* scope) {
- Handle<ModuleInfo> info = Allocate(isolate, interface->Length());
- info->set_host_index(interface->Index());
- int i = 0;
- for (Interface::Iterator it = interface->iterator();
- !it.done(); it.Advance(), ++i) {
- Variable* var = scope->LocalLookup(it.name());
- info->set_name(i, *it.name());
- info->set_mode(i, var->mode());
- ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
- if (var->mode() == MODULE) {
- ASSERT(it.interface()->IsFrozen());
- ASSERT(it.interface()->Index() >= 0);
- info->set_index(i, it.interface()->Index());
- } else {
- ASSERT(var->index() >= 0);
- info->set_index(i, var->index());
- }
- }
- ASSERT(i == info->length());
- return info;
-}
-
} } // namespace v8::internal
diff --git a/deps/v8/src/scopeinfo.h b/deps/v8/src/scopeinfo.h
index a884b3b9e..93734f5a1 100644
--- a/deps/v8/src/scopeinfo.h
+++ b/deps/v8/src/scopeinfo.h
@@ -114,9 +114,9 @@ class ContextSlotCache {
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
- class ModeField: public BitField<VariableMode, 0, 4> {};
- class InitField: public BitField<InitializationFlag, 4, 1> {};
- class IndexField: public BitField<int, 5, 32-5> {};
+ class ModeField: public BitField<VariableMode, 0, 3> {};
+ class InitField: public BitField<InitializationFlag, 3, 1> {};
+ class IndexField: public BitField<int, 4, 32-4> {};
private:
uint32_t value_;
@@ -130,67 +130,6 @@ class ContextSlotCache {
};
-
-
-//---------------------------------------------------------------------------
-// Auxiliary class used for the description of module instances.
-// Used by Runtime_DeclareModules.
-
-class ModuleInfo: public FixedArray {
- public:
- static ModuleInfo* cast(Object* description) {
- return static_cast<ModuleInfo*>(FixedArray::cast(description));
- }
-
- static Handle<ModuleInfo> Create(
- Isolate* isolate, Interface* interface, Scope* scope);
-
- // Index of module's context in host context.
- int host_index() { return Smi::cast(get(HOST_OFFSET))->value(); }
-
- // Name, mode, and index of the i-th export, respectively.
- // For value exports, the index is the slot of the value in the module
- // context, for exported modules it is the slot index of the
- // referred module's context in the host context.
- // TODO(rossberg): This format cannot yet handle exports of modules declared
- // in earlier scripts.
- String* name(int i) { return String::cast(get(name_offset(i))); }
- VariableMode mode(int i) {
- return static_cast<VariableMode>(Smi::cast(get(mode_offset(i)))->value());
- }
- int index(int i) { return Smi::cast(get(index_offset(i)))->value(); }
-
- int length() { return (FixedArray::length() - HEADER_SIZE) / ITEM_SIZE; }
-
- private:
- // The internal format is: Index, (Name, VariableMode, Index)*
- enum {
- HOST_OFFSET,
- NAME_OFFSET,
- MODE_OFFSET,
- INDEX_OFFSET,
- HEADER_SIZE = NAME_OFFSET,
- ITEM_SIZE = INDEX_OFFSET - NAME_OFFSET + 1
- };
- inline int name_offset(int i) { return NAME_OFFSET + i * ITEM_SIZE; }
- inline int mode_offset(int i) { return MODE_OFFSET + i * ITEM_SIZE; }
- inline int index_offset(int i) { return INDEX_OFFSET + i * ITEM_SIZE; }
-
- static Handle<ModuleInfo> Allocate(Isolate* isolate, int length) {
- return Handle<ModuleInfo>::cast(
- isolate->factory()->NewFixedArray(HEADER_SIZE + ITEM_SIZE * length));
- }
- void set_host_index(int index) { set(HOST_OFFSET, Smi::FromInt(index)); }
- void set_name(int i, String* name) { set(name_offset(i), name); }
- void set_mode(int i, VariableMode mode) {
- set(mode_offset(i), Smi::FromInt(mode));
- }
- void set_index(int i, int index) {
- set(index_offset(i), Smi::FromInt(index));
- }
-};
-
-
} } // namespace v8::internal
#endif // V8_SCOPEINFO_H_
diff --git a/deps/v8/src/scopes.cc b/deps/v8/src/scopes.cc
index 56a922d25..c9612577a 100644
--- a/deps/v8/src/scopes.cc
+++ b/deps/v8/src/scopes.cc
@@ -108,7 +108,6 @@ Scope::Scope(Scope* outer_scope, ScopeType type, Zone* zone)
: isolate_(Isolate::Current()),
inner_scopes_(4, zone),
variables_(zone),
- internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -132,7 +131,6 @@ Scope::Scope(Scope* inner_scope,
: isolate_(Isolate::Current()),
inner_scopes_(4, zone),
variables_(zone),
- internals_(4, zone),
temps_(4, zone),
params_(4, zone),
unresolved_(16, zone),
@@ -155,7 +153,6 @@ Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
: isolate_(Isolate::Current()),
inner_scopes_(1, zone),
variables_(zone),
- internals_(0, zone),
temps_(0, zone),
params_(0, zone),
unresolved_(0, zone),
@@ -200,8 +197,6 @@ void Scope::SetDefaults(ScopeType type,
num_var_or_const_ = 0;
num_stack_slots_ = 0;
num_heap_slots_ = 0;
- num_modules_ = 0;
- module_var_ = NULL,
scope_info_ = scope_info;
start_position_ = RelocInfo::kNoPosition;
end_position_ = RelocInfo::kNoPosition;
@@ -380,7 +375,6 @@ void Scope::Initialize() {
Scope* Scope::FinalizeBlockScope() {
ASSERT(is_block_scope());
- ASSERT(internals_.is_empty());
ASSERT(temps_.is_empty());
ASSERT(params_.is_empty());
@@ -521,19 +515,6 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
}
-Variable* Scope::NewInternal(Handle<String> name) {
- ASSERT(!already_resolved());
- Variable* var = new(zone()) Variable(this,
- name,
- INTERNAL,
- false,
- Variable::NORMAL,
- kCreatedInitialized);
- internals_.Add(var, zone());
- return var;
-}
-
-
Variable* Scope::NewTemporary(Handle<String> name) {
ASSERT(!already_resolved());
Variable* var = new(zone()) Variable(this,
@@ -634,15 +615,6 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
ASSERT(stack_locals != NULL);
ASSERT(context_locals != NULL);
- // Collect internals which are always allocated on the heap.
- for (int i = 0; i < internals_.length(); i++) {
- Variable* var = internals_[i];
- if (var->is_used()) {
- ASSERT(var->IsContextSlot());
- context_locals->Add(var, zone());
- }
- }
-
// Collect temporaries which are always allocated on the stack.
for (int i = 0; i < temps_.length(); i++) {
Variable* var = temps_[i];
@@ -652,8 +624,9 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
}
}
- // Collect declared local variables.
ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
+ // Collect declared local variables.
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
@@ -686,18 +659,18 @@ bool Scope::AllocateVariables(CompilationInfo* info,
}
PropagateScopeInfo(outer_scope_calls_non_strict_eval);
- // 2) Allocate module instances.
- if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
- ASSERT(num_modules_ == 0);
- AllocateModulesRecursively(this);
- }
-
- // 3) Resolve variables.
+ // 2) Resolve variables.
if (!ResolveVariablesRecursively(info, factory)) return false;
- // 4) Allocate variables.
+ // 3) Allocate variables.
AllocateVariablesRecursively();
+ // 4) Allocate and link module instance objects.
+ if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+ AllocateModules(info);
+ LinkModules(info);
+ }
+
return true;
}
@@ -729,12 +702,17 @@ bool Scope::HasTrivialOuterContext() const {
bool Scope::HasLazyCompilableOuterContext() const {
Scope* outer = outer_scope_;
if (outer == NULL) return true;
- // We have to prevent lazy compilation if this scope is inside a with scope
- // and all declaration scopes between them have empty contexts. Such
- // declaration scopes may become invisible during scope info deserialization.
+ // There are several reasons that prevent lazy compilation:
+ // - This scope is inside a with scope and all declaration scopes between
+ // them have empty contexts. Such declaration scopes become invisible
+ // during scope info deserialization.
+ // - This scope is inside a strict eval scope with variables that are
+ // potentially context allocated in an artificial function scope that
+ // is not deserialized correctly.
outer = outer->DeclarationScope();
bool found_non_trivial_declarations = false;
for (const Scope* scope = outer; scope != NULL; scope = scope->outer_scope_) {
+ if (scope->is_eval_scope()) return false;
if (scope->is_with_scope() && !found_non_trivial_declarations) return false;
if (scope->is_declaration_scope() && scope->num_heap_slots() > 0) {
found_non_trivial_declarations = true;
@@ -764,15 +742,6 @@ int Scope::ContextChainLength(Scope* scope) {
}
-Scope* Scope::GlobalScope() {
- Scope* scope = this;
- while (!scope->is_global_scope()) {
- scope = scope->outer_scope();
- }
- return scope;
-}
-
-
Scope* Scope::DeclarationScope() {
Scope* scope = this;
while (!scope->is_declaration_scope()) {
@@ -946,11 +915,6 @@ void Scope::Print(int n) {
PrintVar(n1, temps_[i]);
}
- Indent(n1, "// internal vars\n");
- for (int i = 0; i < internals_.length(); i++) {
- PrintVar(n1, internals_[i]);
- }
-
Indent(n1, "// local vars\n");
PrintMap(n1, &variables_);
@@ -1101,6 +1065,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
ASSERT(var != NULL);
+ proxy->BindTo(var);
if (FLAG_harmony_modules) {
bool ok;
@@ -1136,8 +1101,6 @@ bool Scope::ResolveVariable(CompilationInfo* info,
}
}
- proxy->BindTo(var);
-
return true;
}
@@ -1212,7 +1175,6 @@ bool Scope::MustAllocateInContext(Variable* var) {
// Exceptions: temporary variables are never allocated in a context;
// catch-bound variables are always allocated in a context.
if (var->mode() == TEMPORARY) return false;
- if (var->mode() == INTERNAL) return true;
if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
return var->has_forced_context_allocation() ||
@@ -1319,17 +1281,15 @@ void Scope::AllocateNonParameterLocals() {
AllocateNonParameterLocal(temps_[i]);
}
- for (int i = 0; i < internals_.length(); i++) {
- AllocateNonParameterLocal(internals_[i]);
- }
-
ZoneList<VarAndOrder> vars(variables_.occupancy(), zone());
+
for (VariableMap::Entry* p = variables_.Start();
p != NULL;
p = variables_.Next(p)) {
Variable* var = reinterpret_cast<Variable*>(p->value);
vars.Add(VarAndOrder(var, p->order), zone());
}
+
vars.Sort(VarAndOrder::Compare);
int var_count = vars.length();
for (int i = 0; i < var_count; i++) {
@@ -1382,35 +1342,89 @@ void Scope::AllocateVariablesRecursively() {
}
-void Scope::AllocateModulesRecursively(Scope* host_scope) {
- if (already_resolved()) return;
+int Scope::StackLocalCount() const {
+ return num_stack_slots() -
+ (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
+}
+
+
+int Scope::ContextLocalCount() const {
+ if (num_heap_slots() == 0) return 0;
+ return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
+ (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
+}
+
+
+void Scope::AllocateModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+
if (is_module_scope()) {
ASSERT(interface_->IsFrozen());
- const char raw_name[] = ".module";
- Handle<String> name = isolate_->factory()->LookupSymbol(
- Vector<const char>(raw_name, StrLength(raw_name)));
- ASSERT(module_var_ == NULL);
- module_var_ = host_scope->NewInternal(name);
- ++host_scope->num_modules_;
+ ASSERT(scope_info_.is_null());
+
+ // TODO(rossberg): This has to be the initial compilation of this code.
+ // We currently do not allow recompiling any module definitions.
+ Handle<ScopeInfo> scope_info = GetScopeInfo();
+ Factory* factory = info->isolate()->factory();
+ Handle<Context> context = factory->NewModuleContext(scope_info);
+ Handle<JSModule> instance = factory->NewJSModule(context, scope_info);
+ context->set_module(*instance);
+
+ bool ok;
+ interface_->MakeSingleton(instance, &ok);
+ ASSERT(ok);
}
+ // Allocate nested modules.
for (int i = 0; i < inner_scopes_.length(); i++) {
Scope* inner_scope = inner_scopes_.at(i);
- inner_scope->AllocateModulesRecursively(host_scope);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->AllocateModules(info);
+ }
}
}
-int Scope::StackLocalCount() const {
- return num_stack_slots() -
- (function_ != NULL && function_->proxy()->var()->IsStackLocal() ? 1 : 0);
-}
+void Scope::LinkModules(CompilationInfo* info) {
+ ASSERT(is_global_scope() || is_module_scope());
+ if (is_module_scope()) {
+ Handle<JSModule> instance = interface_->Instance();
+
+ // Populate the module instance object.
+ const PropertyAttributes ro_attr =
+ static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE | DONT_ENUM);
+ const PropertyAttributes rw_attr =
+ static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM);
+ for (Interface::Iterator it = interface_->iterator();
+ !it.done(); it.Advance()) {
+ if (it.interface()->IsModule()) {
+ Handle<Object> value = it.interface()->Instance();
+ ASSERT(!value.is_null());
+ JSReceiver::SetProperty(
+ instance, it.name(), value, ro_attr, kStrictMode);
+ } else {
+ Variable* var = LocalLookup(it.name());
+ ASSERT(var != NULL && var->IsContextSlot());
+ PropertyAttributes attr = var->is_const_mode() ? ro_attr : rw_attr;
+ Handle<AccessorInfo> info =
+ Accessors::MakeModuleExport(it.name(), var->index(), attr);
+ Handle<Object> result = SetAccessor(instance, info);
+ ASSERT(!(result.is_null() || result->IsUndefined()));
+ USE(result);
+ }
+ }
+ USE(JSObject::PreventExtensions(instance));
+ }
-int Scope::ContextLocalCount() const {
- if (num_heap_slots() == 0) return 0;
- return num_heap_slots() - Context::MIN_CONTEXT_SLOTS -
- (function_ != NULL && function_->proxy()->var()->IsContextSlot() ? 1 : 0);
+ // Link nested modules.
+ for (int i = 0; i < inner_scopes_.length(); i++) {
+ Scope* inner_scope = inner_scopes_.at(i);
+ if (inner_scope->is_module_scope()) {
+ inner_scope->LinkModules(info);
+ }
+ }
}
+
} } // namespace v8::internal
diff --git a/deps/v8/src/scopes.h b/deps/v8/src/scopes.h
index c60c2e7d4..b9d151cba 100644
--- a/deps/v8/src/scopes.h
+++ b/deps/v8/src/scopes.h
@@ -186,12 +186,6 @@ class Scope: public ZoneObject {
// such a variable again if it was added; otherwise this is a no-op.
void RemoveUnresolved(VariableProxy* var);
- // Creates a new internal variable in this scope. The name is only used
- // for printing and cannot be used to find the variable. In particular,
- // the only way to get hold of the temporary is by keeping the Variable*
- // around.
- Variable* NewInternal(Handle<String> name);
-
// Creates a new temporary variable in this scope. The name is only used
// for printing and cannot be used to find the variable. In particular,
// the only way to get hold of the temporary is by keeping the Variable*
@@ -375,12 +369,6 @@ class Scope: public ZoneObject {
int StackLocalCount() const;
int ContextLocalCount() const;
- // For global scopes, the number of module literals (including nested ones).
- int num_modules() const { return num_modules_; }
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var() const { return module_var_; }
-
// Make sure this scope and all outer scopes are eagerly compiled.
void ForceEagerCompilation() { force_eager_compilation_ = true; }
@@ -399,9 +387,6 @@ class Scope: public ZoneObject {
// The number of contexts between this and scope; zero if this == scope.
int ContextChainLength(Scope* scope);
- // Find the innermost global scope.
- Scope* GlobalScope();
-
// Find the first function, global, or eval scope. This is the scope
// where var declarations will be hoisted to in the implementation.
Scope* DeclarationScope();
@@ -456,8 +441,6 @@ class Scope: public ZoneObject {
// variables may be implicitly 'declared' by being used (possibly in
// an inner scope) with no intervening with statements or eval calls.
VariableMap variables_;
- // Compiler-allocated (user-invisible) internals.
- ZoneList<Variable*> internals_;
// Compiler-allocated (user-invisible) temporaries.
ZoneList<Variable*> temps_;
// Parameter list in source order.
@@ -511,12 +494,6 @@ class Scope: public ZoneObject {
int num_stack_slots_;
int num_heap_slots_;
- // The number of modules (including nested ones).
- int num_modules_;
-
- // For module scopes, the host scope's internal variable binding this module.
- Variable* module_var_;
-
// Serialized scope info support.
Handle<ScopeInfo> scope_info_;
bool already_resolved() { return already_resolved_; }
@@ -601,7 +578,6 @@ class Scope: public ZoneObject {
void AllocateNonParameterLocal(Variable* var);
void AllocateNonParameterLocals();
void AllocateVariablesRecursively();
- void AllocateModulesRecursively(Scope* host_scope);
// Resolve and fill in the allocation information for all variables
// in this scopes. Must be called *after* all scopes have been
@@ -615,6 +591,13 @@ class Scope: public ZoneObject {
bool AllocateVariables(CompilationInfo* info,
AstNodeFactory<AstNullVisitor>* factory);
+ // Instance objects have to be created ahead of time (before code generation)
+ // because of potentially cyclic references between them.
+ // Linking also has to be a separate stage, since populating one object may
+ // potentially require (forward) references to others.
+ void AllocateModules(CompilationInfo* info);
+ void LinkModules(CompilationInfo* info);
+
private:
// Construct a scope based on the scope info.
Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
diff --git a/deps/v8/src/serialize.cc b/deps/v8/src/serialize.cc
index dfc55740a..2ea09f89c 100644
--- a/deps/v8/src/serialize.cc
+++ b/deps/v8/src/serialize.cc
@@ -523,10 +523,6 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
UNCLASSIFIED,
50,
"pending_message_script");
- Add(ExternalReference::get_make_code_young_function(isolate).address(),
- UNCLASSIFIED,
- 51,
- "Code::MakeCodeYoung");
}
@@ -1301,7 +1297,7 @@ void PartialSerializer::SerializeObject(
// The code-caches link to context-specific code objects, which
// the startup and context serializes cannot currently handle.
ASSERT(Map::cast(heap_object)->code_cache() ==
- heap_object->GetHeap()->empty_fixed_array());
+ heap_object->GetHeap()->raw_unchecked_empty_fixed_array());
}
int root_index;
diff --git a/deps/v8/src/spaces-inl.h b/deps/v8/src/spaces-inl.h
index 8a576a83f..c64772775 100644
--- a/deps/v8/src/spaces-inl.h
+++ b/deps/v8/src/spaces-inl.h
@@ -214,19 +214,6 @@ MemoryChunk* MemoryChunk::FromAnyPointerAddress(Address addr) {
}
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
- if (mark == NULL) return;
- // Need to subtract one from the mark because when a chunk is full the
- // top points to the next address after the chunk, which effectively belongs
- // to another chunk. See the comment to Page::FromAllocationTop.
- MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
- int new_mark = static_cast<int>(mark - chunk->address());
- if (new_mark > chunk->high_water_mark_) {
- chunk->high_water_mark_ = new_mark;
- }
-}
-
-
PointerChunkIterator::PointerChunkIterator(Heap* heap)
: state_(kOldPointerState),
old_pointer_iterator_(heap->old_pointer_space()),
diff --git a/deps/v8/src/spaces.cc b/deps/v8/src/spaces.cc
index cacd96915..cc841806b 100644
--- a/deps/v8/src/spaces.cc
+++ b/deps/v8/src/spaces.cc
@@ -448,8 +448,6 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap,
chunk->slots_buffer_ = NULL;
chunk->skip_list_ = NULL;
chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
- chunk->progress_bar_ = 0;
- chunk->high_water_mark_ = static_cast<int>(area_start - base);
chunk->ResetLiveBytes();
Bitmap::Clear(chunk);
chunk->initialize_scan_on_scavenge(false);
@@ -826,18 +824,6 @@ void PagedSpace::TearDown() {
}
-size_t PagedSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- size_t size = 0;
- PageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
- }
- return size;
-}
-
-
MaybeObject* PagedSpace::FindObject(Address addr) {
// Note: this function can only be called on precisely swept spaces.
ASSERT(!heap()->mark_compact_collector()->in_use());
@@ -1189,7 +1175,6 @@ void NewSpace::Shrink() {
void NewSpace::UpdateAllocationInfo() {
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = to_space_.page_low();
allocation_info_.limit = to_space_.page_high();
@@ -1402,17 +1387,6 @@ bool SemiSpace::Uncommit() {
}
-size_t SemiSpace::CommittedPhysicalMemory() {
- if (!is_committed()) return 0;
- size_t size = 0;
- NewSpacePageIterator it(this);
- while (it.has_next()) {
- size += it.next()->CommittedPhysicalMemory();
- }
- return size;
-}
-
-
bool SemiSpace::GrowTo(int new_capacity) {
if (!is_committed()) {
if (!Commit()) return false;
@@ -1847,17 +1821,6 @@ void NewSpace::RecordPromotion(HeapObject* obj) {
promoted_histogram_[type].increment_bytes(obj->Size());
}
-
-size_t NewSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
- size_t size = to_space_.CommittedPhysicalMemory();
- if (from_space_.is_committed()) {
- size += from_space_.CommittedPhysicalMemory();
- }
- return size;
-}
-
// -----------------------------------------------------------------------------
// Free lists for old object spaces implementation
@@ -2391,13 +2354,10 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
- // If there are unswept pages advance lazy sweeper a bounded number of times
- // until we find a size_in_bytes contiguous piece of memory
- const int kMaxSweepingTries = 5;
- bool sweeping_complete = false;
-
- for (int i = 0; i < kMaxSweepingTries && !sweeping_complete; i++) {
- sweeping_complete = AdvanceSweeper(size_in_bytes);
+ // If there are unswept pages advance lazy sweeper then sweep one page before
+ // allocating a new page.
+ if (first_unswept_page_->is_valid()) {
+ AdvanceSweeper(size_in_bytes);
// Retry the free list allocation.
HeapObject* object = free_list_.Allocate(size_in_bytes);
@@ -2738,18 +2698,6 @@ MaybeObject* LargeObjectSpace::AllocateRaw(int object_size,
}
-size_t LargeObjectSpace::CommittedPhysicalMemory() {
- if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
- size_t size = 0;
- LargePage* current = first_page_;
- while (current != NULL) {
- size += current->CommittedPhysicalMemory();
- current = current->next_page();
- }
- return size;
-}
-
-
// GC support
MaybeObject* LargeObjectSpace::FindObject(Address a) {
LargePage* page = FindPage(a);
@@ -2788,8 +2736,7 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
MarkBit mark_bit = Marking::MarkBitFrom(object);
if (mark_bit.Get()) {
mark_bit.Clear();
- Page::FromAddress(object->address())->ResetProgressBar();
- Page::FromAddress(object->address())->ResetLiveBytes();
+ MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
previous = current;
current = current->next_page();
} else {
diff --git a/deps/v8/src/spaces.h b/deps/v8/src/spaces.h
index 56f629e02..95c63d6b6 100644
--- a/deps/v8/src/spaces.h
+++ b/deps/v8/src/spaces.h
@@ -397,12 +397,6 @@ class MemoryChunk {
WAS_SWEPT_PRECISELY,
WAS_SWEPT_CONSERVATIVELY,
- // Large objects can have a progress bar in their page header. These object
- // are scanned in increments and will be kept black while being scanned.
- // Even if the mutator writes to them they will be kept black and a white
- // to grey transition is performed in the value.
- HAS_PROGRESS_BAR,
-
// Last flag, keep at bottom.
NUM_MEMORY_CHUNK_FLAGS
};
@@ -486,29 +480,6 @@ class MemoryChunk {
write_barrier_counter_ = counter;
}
- int progress_bar() {
- ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
- return progress_bar_;
- }
-
- void set_progress_bar(int progress_bar) {
- ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
- progress_bar_ = progress_bar;
- }
-
- void ResetProgressBar() {
- if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
- set_progress_bar(0);
- ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
- }
- }
-
- bool IsLeftOfProgressBar(Object** slot) {
- Address slot_address = reinterpret_cast<Address>(slot);
- ASSERT(slot_address > this->address());
- return (slot_address - (this->address() + kObjectStartOffset)) <
- progress_bar();
- }
static void IncrementLiveBytesFromGC(Address address, int by) {
MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
@@ -533,8 +504,7 @@ class MemoryChunk {
static const size_t kWriteBarrierCounterOffset =
kSlotsBufferOffset + kPointerSize + kPointerSize;
- static const size_t kHeaderSize =
- kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize;
+ static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize;
static const int kBodyOffset =
CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
@@ -646,13 +616,6 @@ class MemoryChunk {
return static_cast<int>(area_end() - area_start());
}
- // Approximate amount of physical memory committed for this chunk.
- size_t CommittedPhysicalMemory() {
- return high_water_mark_;
- }
-
- static inline void UpdateHighWaterMark(Address mark);
-
protected:
MemoryChunk* next_chunk_;
MemoryChunk* prev_chunk_;
@@ -678,12 +641,6 @@ class MemoryChunk {
SlotsBuffer* slots_buffer_;
SkipList* skip_list_;
intptr_t write_barrier_counter_;
- // Used by the incremental marker to keep track of the scanning progress in
- // large objects that have a progress bar and are scanned in increments.
- int progress_bar_;
- // Assuming the initial allocation on a page is sequential,
- // count highest number of bytes ever allocated on the page.
- int high_water_mark_;
static MemoryChunk* Initialize(Heap* heap,
Address base,
@@ -1533,9 +1490,6 @@ class PagedSpace : public Space {
// spaces this equals the capacity.
intptr_t CommittedMemory() { return Capacity(); }
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
// Sets the capacity, the available space and the wasted space to zero.
// The stats are rebuilt during sweeping by adding each page to the
// capacity and the size when it is encountered. As free spaces are
@@ -1596,7 +1550,6 @@ class PagedSpace : public Space {
void SetTop(Address top, Address limit) {
ASSERT(top == limit ||
Page::FromAddress(top) == Page::FromAddress(limit - 1));
- MemoryChunk::UpdateHighWaterMark(allocation_info_.top);
allocation_info_.top = top;
allocation_info_.limit = limit;
}
@@ -2008,9 +1961,6 @@ class SemiSpace : public Space {
static void Swap(SemiSpace* from, SemiSpace* to);
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
private:
// Flips the semispace between being from-space and to-space.
// Copies the flags into the masked positions on all pages in the space.
@@ -2208,9 +2158,6 @@ class NewSpace : public Space {
return Capacity();
}
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
// Return the available bytes without growing.
intptr_t Available() {
return Capacity() - Size();
@@ -2440,9 +2387,11 @@ class FixedSpace : public PagedSpace {
FixedSpace(Heap* heap,
intptr_t max_capacity,
AllocationSpace id,
- int object_size_in_bytes)
+ int object_size_in_bytes,
+ const char* name)
: PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
- object_size_in_bytes_(object_size_in_bytes) {
+ object_size_in_bytes_(object_size_in_bytes),
+ name_(name) {
page_extra_ = Page::kNonCodeObjectAreaSize % object_size_in_bytes;
}
@@ -2459,6 +2408,9 @@ class FixedSpace : public PagedSpace {
private:
// The size of objects in this space.
int object_size_in_bytes_;
+
+ // The name of this space.
+ const char* name_;
};
@@ -2469,7 +2421,7 @@ class MapSpace : public FixedSpace {
public:
// Creates a map space object with a maximum capacity.
MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, Map::kSize),
+ : FixedSpace(heap, max_capacity, id, Map::kSize, "map"),
max_map_space_pages_(kMaxMapPageIndex - 1) {
}
@@ -2510,7 +2462,7 @@ class CellSpace : public FixedSpace {
public:
// Creates a property cell space object with a maximum capacity.
CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
- : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize)
+ : FixedSpace(heap, max_capacity, id, JSGlobalPropertyCell::kSize, "cell")
{}
virtual int RoundSizeDownToObjectAlignment(int size) {
@@ -2572,9 +2524,6 @@ class LargeObjectSpace : public Space {
return Size();
}
- // Approximate amount of physical memory committed for this space.
- size_t CommittedPhysicalMemory();
-
int PageCount() {
return page_count_;
}
diff --git a/deps/v8/src/store-buffer.h b/deps/v8/src/store-buffer.h
index 79046d154..0ade8cee1 100644
--- a/deps/v8/src/store-buffer.h
+++ b/deps/v8/src/store-buffer.h
@@ -210,7 +210,8 @@ class StoreBufferRebuildScope {
explicit StoreBufferRebuildScope(Heap* heap,
StoreBuffer* store_buffer,
StoreBufferCallback callback)
- : store_buffer_(store_buffer),
+ : heap_(heap),
+ store_buffer_(store_buffer),
stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
stored_callback_(store_buffer->callback_) {
store_buffer_->store_buffer_rebuilding_enabled_ = true;
@@ -225,6 +226,7 @@ class StoreBufferRebuildScope {
}
private:
+ Heap* heap_;
StoreBuffer* store_buffer_;
bool stored_state_;
StoreBufferCallback stored_callback_;
diff --git a/deps/v8/src/string.js b/deps/v8/src/string.js
index eb9aa3548..6115930b6 100644
--- a/deps/v8/src/string.js
+++ b/deps/v8/src/string.js
@@ -186,15 +186,11 @@ function StringMatch(regexp) {
}
var subject = TO_STRING_INLINE(this);
if (IS_REGEXP(regexp)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even though
- // value is discarded.
- ToInteger(regexp.lastIndex);
if (!regexp.global) return RegExpExecNoTests(regexp, subject, 0);
%_Log('regexp', 'regexp-match,%0S,%1r', [subject, regexp]);
// lastMatchInfo is defined in regexp.js.
var result = %StringMatch(subject, regexp, lastMatchInfo);
if (result !== null) lastMatchInfoOverride = null;
- regexp.lastIndex = 0;
return result;
}
// Non-regexp argument.
@@ -231,9 +227,6 @@ function StringReplace(search, replace) {
// Delegate to one of the regular expression variants if necessary.
if (IS_REGEXP(search)) {
- // Emulate RegExp.prototype.exec's side effect in step 5, even though
- // value is discarded.
- ToInteger(search.lastIndex);
%_Log('regexp', 'regexp-replace,%0r,%1S', [search, subject]);
if (IS_SPEC_FUNCTION(replace)) {
if (search.global) {
@@ -245,16 +238,10 @@ function StringReplace(search, replace) {
}
} else {
if (lastMatchInfoOverride == null) {
- var answer = %StringReplaceRegExpWithString(subject,
- search,
- TO_STRING_INLINE(replace),
- lastMatchInfo);
- if (IS_UNDEFINED(answer)) { // No match. Return subject string.
- search.lastIndex = 0;
- return subject;
- }
- if (search.global) search.lastIndex = 0;
- return answer;
+ return %StringReplaceRegExpWithString(subject,
+ search,
+ TO_STRING_INLINE(replace),
+ lastMatchInfo);
} else {
// We use this hack to detect whether StringReplaceRegExpWithString
// found at least one hit. In that case we need to remove any
@@ -265,17 +252,11 @@ function StringReplace(search, replace) {
search,
TO_STRING_INLINE(replace),
lastMatchInfo);
- if (IS_UNDEFINED(answer)) { // No match. Return subject string.
- search.lastIndex = 0;
- lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
- return subject;
- }
if (%_IsSmi(lastMatchInfo[LAST_SUBJECT_INDEX])) {
lastMatchInfo[LAST_SUBJECT_INDEX] = saved_subject;
} else {
lastMatchInfoOverride = null;
}
- if (search.global) search.lastIndex = 0;
return answer;
}
}
@@ -470,10 +451,7 @@ function StringReplaceGlobalRegExpWithFunction(subject, regexp, replace) {
function StringReplaceNonGlobalRegExpWithFunction(subject, regexp, replace) {
var matchInfo = DoRegExpExec(regexp, subject, 0);
- if (IS_NULL(matchInfo)) {
- regexp.lastIndex = 0;
- return subject;
- }
+ if (IS_NULL(matchInfo)) return subject;
var index = matchInfo[CAPTURE0];
var result = SubString(subject, 0, index);
var endOfMatch = matchInfo[CAPTURE1];
@@ -823,7 +801,6 @@ function StringTrimRight() {
var static_charcode_array = new InternalArray(4);
-
// ECMA-262, section 15.5.3.2
function StringFromCharCode(code) {
var n = %_ArgumentsLength();
@@ -832,25 +809,17 @@ function StringFromCharCode(code) {
return %_StringCharFromCode(code & 0xffff);
}
- var one_byte = %NewString(n, NEW_ONE_BYTE_STRING);
- var i;
- for (i = 0; i < n; i++) {
+ // NOTE: This is not super-efficient, but it is necessary because we
+ // want to avoid converting to numbers from within the virtual
+ // machine. Maybe we can find another way of doing this?
+ var codes = static_charcode_array;
+ for (var i = 0; i < n; i++) {
var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- if (code < 0) code = code & 0xffff;
- if (code > 0x7f) break;
- %_OneByteSeqStringSetChar(one_byte, i, code);
- }
- if (i == n) return one_byte;
- one_byte = %TruncateString(one_byte, i);
-
- var two_byte = %NewString(n - i, NEW_TWO_BYTE_STRING);
- for (var j = 0; i < n; i++, j++) {
- var code = %_Arguments(i);
- if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
- %_TwoByteSeqStringSetChar(two_byte, j, code);
+ if (!%_IsSmi(code)) code = ToNumber(code);
+ codes[i] = code;
}
- return one_byte + two_byte;
+ codes.length = n;
+ return %StringFromCharCodeArray(codes);
}
diff --git a/deps/v8/src/stub-cache.cc b/deps/v8/src/stub-cache.cc
index bfed6bbac..411914719 100644
--- a/deps/v8/src/stub-cache.cc
+++ b/deps/v8/src/stub-cache.cc
@@ -120,8 +120,7 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
// name specific if there are global objects involved.
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NONEXISTENT);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*cache_name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -137,11 +136,10 @@ Handle<Code> StubCache::ComputeLoadNonexistent(Handle<String> name,
Handle<Code> StubCache::ComputeLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field_index) {
+ int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -162,8 +160,7 @@ Handle<Code> StubCache::ComputeLoadCallback(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -183,8 +180,7 @@ Handle<Code> StubCache::ComputeLoadViaGetter(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -204,8 +200,7 @@ Handle<Code> StubCache::ComputeLoadConstant(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -224,8 +219,7 @@ Handle<Code> StubCache::ComputeLoadInterceptor(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -251,8 +245,7 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::LOAD_IC, Code::NORMAL);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
LoadStubCompiler compiler(isolate_);
@@ -268,12 +261,11 @@ Handle<Code> StubCache::ComputeLoadGlobal(Handle<String> name,
Handle<Code> StubCache::ComputeKeyedLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field_index) {
+ int field_index) {
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::FIELD);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -293,8 +285,7 @@ Handle<Code> StubCache::ComputeKeyedLoadConstant(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC,
Code::CONSTANT_FUNCTION);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -313,8 +304,7 @@ Handle<Code> StubCache::ComputeKeyedLoadInterceptor(Handle<String> name,
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::INTERCEPTOR);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -334,8 +324,7 @@ Handle<Code> StubCache::ComputeKeyedLoadCallback(
ASSERT(IC::GetCodeCacheForObject(*receiver, *holder) == OWN_MAP);
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -352,8 +341,7 @@ Handle<Code> StubCache::ComputeKeyedLoadArrayLength(Handle<String> name,
Handle<JSArray> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -370,7 +358,7 @@ Handle<Code> StubCache::ComputeKeyedLoadStringLength(Handle<String> name,
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
Handle<Map> map(receiver->map());
- Handle<Object> probe(map->FindInCodeCache(*name, flags), isolate_);
+ Handle<Object> probe(map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -387,8 +375,7 @@ Handle<Code> StubCache::ComputeKeyedLoadFunctionPrototype(
Handle<JSFunction> receiver) {
Code::Flags flags =
Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC, Code::CALLBACKS);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedLoadStubCompiler compiler(isolate_);
@@ -409,8 +396,7 @@ Handle<Code> StubCache::ComputeStoreField(Handle<String> name,
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -452,7 +438,7 @@ Handle<Code> StubCache::ComputeKeyedLoadOrStoreElement(
UNREACHABLE();
break;
}
- Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
+ Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
Handle<Code> code;
@@ -504,8 +490,7 @@ Handle<Code> StubCache::ComputeStoreGlobal(Handle<String> name,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::NORMAL, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -525,8 +510,7 @@ Handle<Code> StubCache::ComputeStoreCallback(Handle<String> name,
ASSERT(v8::ToCData<Address>(callback->setter()) != 0);
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -546,8 +530,7 @@ Handle<Code> StubCache::ComputeStoreViaSetter(Handle<String> name,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::CALLBACKS, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -565,8 +548,7 @@ Handle<Code> StubCache::ComputeStoreInterceptor(Handle<String> name,
StrictModeFlag strict_mode) {
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::STORE_IC, Code::INTERCEPTOR, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
StoreStubCompiler compiler(isolate_, strict_mode);
@@ -586,8 +568,7 @@ Handle<Code> StubCache::ComputeKeyedStoreField(Handle<String> name,
(transition.is_null()) ? Code::FIELD : Code::MAP_TRANSITION;
Code::Flags flags = Code::ComputeMonomorphicFlags(
Code::KEYED_STORE_IC, type, strict_mode);
- Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(receiver->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
KeyedStoreStubCompiler compiler(isolate(), strict_mode,
@@ -629,8 +610,7 @@ Handle<Code> StubCache::ComputeCallConstant(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::CONSTANT_FUNCTION, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -652,7 +632,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
Handle<String> name,
Handle<Object> object,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// Compute the check type and the map.
InlineCacheHolderFlag cache_holder =
IC::GetCodeCacheForObject(*object, *holder);
@@ -668,8 +648,7 @@ Handle<Code> StubCache::ComputeCallField(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::FIELD, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate_, argc, kind, extra_state, cache_holder);
@@ -706,8 +685,7 @@ Handle<Code> StubCache::ComputeCallInterceptor(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::INTERCEPTOR, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -737,8 +715,7 @@ Handle<Code> StubCache::ComputeCallGlobal(int argc,
Code::Flags flags =
Code::ComputeMonomorphicFlags(kind, Code::NORMAL, extra_state,
cache_holder, argc);
- Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags),
- isolate_);
+ Handle<Object> probe(map_holder->map()->FindInCodeCache(*name, flags));
if (probe->IsCode()) return Handle<Code>::cast(probe);
CallStubCompiler compiler(isolate(), argc, kind, extra_state, cache_holder);
@@ -770,8 +747,10 @@ Code* StubCache::FindCallInitialize(int argc,
CallICBase::Contextual::encode(mode == RelocInfo::CODE_TARGET_CONTEXT);
Code::Flags flags =
Code::ComputeFlags(kind, UNINITIALIZED, extra_state, Code::NORMAL, argc);
+
+ // Use raw_unchecked... so we don't get assert failures during GC.
UnseededNumberDictionary* dictionary =
- isolate()->heap()->non_monomorphic_cache();
+ isolate()->heap()->raw_unchecked_non_monomorphic_cache();
int entry = dictionary->FindEntry(isolate(), flags);
ASSERT(entry != -1);
Object* code = dictionary->ValueAt(entry);
@@ -1562,7 +1541,6 @@ int CallOptimization::GetPrototypeDepthOfExpectedType(
while (!object.is_identical_to(holder)) {
if (object->IsInstanceOf(*expected_receiver_type_)) return depth;
object = Handle<JSObject>(JSObject::cast(object->GetPrototype()));
- if (!object->map()->is_hidden_prototype()) return kInvalidProtoDepth;
++depth;
}
if (holder->IsInstanceOf(*expected_receiver_type_)) return depth;
diff --git a/deps/v8/src/stub-cache.h b/deps/v8/src/stub-cache.h
index f858e4722..005c537ab 100644
--- a/deps/v8/src/stub-cache.h
+++ b/deps/v8/src/stub-cache.h
@@ -83,7 +83,7 @@ class StubCache {
Handle<Code> ComputeLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ int field_index);
Handle<Code> ComputeLoadCallback(Handle<String> name,
Handle<JSObject> receiver,
@@ -117,7 +117,7 @@ class StubCache {
Handle<Code> ComputeKeyedLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex field_index);
+ int field_index);
Handle<Code> ComputeKeyedLoadCallback(Handle<String> name,
Handle<JSObject> receiver,
@@ -193,7 +193,7 @@ class StubCache {
Handle<String> name,
Handle<Object> object,
Handle<JSObject> holder,
- PropertyIndex index);
+ int index);
Handle<Code> ComputeCallConstant(int argc,
Code::Kind,
@@ -453,7 +453,7 @@ class StubCompiler BASE_EMBEDDED {
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index);
+ int index);
static void GenerateLoadArrayLength(MacroAssembler* masm,
Register receiver,
@@ -540,7 +540,7 @@ class StubCompiler BASE_EMBEDDED {
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss);
@@ -611,7 +611,7 @@ class LoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name);
Handle<Code> CompileLoadCallback(Handle<String> name,
@@ -654,7 +654,7 @@ class KeyedLoadStubCompiler: public StubCompiler {
Handle<Code> CompileLoadField(Handle<String> name,
Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index);
+ int index);
Handle<Code> CompileLoadCallback(Handle<String> name,
Handle<JSObject> object,
@@ -803,7 +803,7 @@ class CallStubCompiler: public StubCompiler {
Handle<Code> CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name);
Handle<Code> CompileCallConstant(Handle<Object> object,
diff --git a/deps/v8/src/token.h b/deps/v8/src/token.h
index 863ba6285..3036e5512 100644
--- a/deps/v8/src/token.h
+++ b/deps/v8/src/token.h
@@ -99,7 +99,6 @@ namespace internal {
T(SHL, "<<", 11) \
T(SAR, ">>", 11) \
T(SHR, ">>>", 11) \
- T(ROR, "rotate right", 11) /* only used by Crankshaft */ \
T(ADD, "+", 12) \
T(SUB, "-", 12) \
T(MUL, "*", 13) \
diff --git a/deps/v8/src/type-info.cc b/deps/v8/src/type-info.cc
index 7a9a5de80..bc6a46b4b 100644
--- a/deps/v8/src/type-info.cc
+++ b/deps/v8/src/type-info.cc
@@ -79,7 +79,7 @@ static uint32_t IdToKey(TypeFeedbackId ast_id) {
Handle<Object> TypeFeedbackOracle::GetInfo(TypeFeedbackId ast_id) {
int entry = dictionary_->FindEntry(IdToKey(ast_id));
return entry != UnseededNumberDictionary::kNotFound
- ? Handle<Object>(dictionary_->ValueAt(entry), isolate_)
+ ? Handle<Object>(dictionary_->ValueAt(entry))
: Handle<Object>::cast(isolate_->factory()->undefined_value());
}
@@ -312,53 +312,43 @@ bool TypeFeedbackOracle::LoadIsBuiltin(Property* expr, Builtins::Name id) {
}
-static TypeInfo TypeFromCompareType(CompareIC::State state) {
+TypeInfo TypeFeedbackOracle::CompareType(CompareOperation* expr) {
+ Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
+ TypeInfo unknown = TypeInfo::Unknown();
+ if (!object->IsCode()) return unknown;
+ Handle<Code> code = Handle<Code>::cast(object);
+ if (!code->is_compare_ic_stub()) return unknown;
+
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
switch (state) {
case CompareIC::UNINITIALIZED:
// Uninitialized means never executed.
return TypeInfo::Uninitialized();
- case CompareIC::SMI:
+ case CompareIC::SMIS:
return TypeInfo::Smi();
- case CompareIC::HEAP_NUMBER:
+ case CompareIC::HEAP_NUMBERS:
return TypeInfo::Number();
- case CompareIC::SYMBOL:
- return TypeInfo::Symbol();
- case CompareIC::STRING:
+ case CompareIC::SYMBOLS:
+ case CompareIC::STRINGS:
return TypeInfo::String();
- case CompareIC::OBJECT:
+ case CompareIC::OBJECTS:
case CompareIC::KNOWN_OBJECTS:
// TODO(kasperl): We really need a type for JS objects here.
return TypeInfo::NonPrimitive();
case CompareIC::GENERIC:
default:
- return TypeInfo::Unknown();
+ return unknown;
}
}
-void TypeFeedbackOracle::CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type) {
+bool TypeFeedbackOracle::IsSymbolCompare(CompareOperation* expr) {
Handle<Object> object = GetInfo(expr->CompareOperationFeedbackId());
- TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) {
- *left_type = *right_type = *overall_type = unknown;
- return;
- }
+ if (!object->IsCode()) return false;
Handle<Code> code = Handle<Code>::cast(object);
- if (!code->is_compare_ic_stub()) {
- *left_type = *right_type = *overall_type = unknown;
- return;
- }
-
- int stub_minor_key = code->stub_info();
- CompareIC::State left_state, right_state, handler_state;
- ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
- &handler_state, NULL);
- *left_type = TypeFromCompareType(left_state);
- *right_type = TypeFromCompareType(right_state);
- *overall_type = TypeFromCompareType(handler_state);
+ if (!code->is_compare_ic_stub()) return false;
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ return state == CompareIC::SYMBOLS;
}
@@ -367,7 +357,7 @@ Handle<Map> TypeFeedbackOracle::GetCompareMap(CompareOperation* expr) {
if (!object->IsCode()) return Handle<Map>::null();
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return Handle<Map>::null();
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
if (state != CompareIC::KNOWN_OBJECTS) {
return Handle<Map>::null();
}
@@ -398,44 +388,55 @@ TypeInfo TypeFeedbackOracle::UnaryType(UnaryOperation* expr) {
}
-static TypeInfo TypeFromBinaryOpType(BinaryOpIC::TypeInfo binary_type) {
- switch (binary_type) {
- // Uninitialized means never executed.
- case BinaryOpIC::UNINITIALIZED: return TypeInfo::Uninitialized();
- case BinaryOpIC::SMI: return TypeInfo::Smi();
- case BinaryOpIC::INT32: return TypeInfo::Integer32();
- case BinaryOpIC::HEAP_NUMBER: return TypeInfo::Double();
- case BinaryOpIC::ODDBALL: return TypeInfo::Unknown();
- case BinaryOpIC::STRING: return TypeInfo::String();
- case BinaryOpIC::GENERIC: return TypeInfo::Unknown();
- }
- UNREACHABLE();
- return TypeInfo::Unknown();
-}
-
-
-void TypeFeedbackOracle::BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result) {
+TypeInfo TypeFeedbackOracle::BinaryType(BinaryOperation* expr) {
Handle<Object> object = GetInfo(expr->BinaryOperationFeedbackId());
TypeInfo unknown = TypeInfo::Unknown();
- if (!object->IsCode()) {
- *left = *right = *result = unknown;
- return;
- }
+ if (!object->IsCode()) return unknown;
Handle<Code> code = Handle<Code>::cast(object);
if (code->is_binary_op_stub()) {
- BinaryOpIC::TypeInfo left_type, right_type, result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &result_type);
- *left = TypeFromBinaryOpType(left_type);
- *right = TypeFromBinaryOpType(right_type);
- *result = TypeFromBinaryOpType(result_type);
- return;
+ BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+ code->binary_op_type());
+ BinaryOpIC::TypeInfo result_type = static_cast<BinaryOpIC::TypeInfo>(
+ code->binary_op_result_type());
+
+ switch (type) {
+ case BinaryOpIC::UNINITIALIZED:
+ // Uninitialized means never executed.
+ return TypeInfo::Uninitialized();
+ case BinaryOpIC::SMI:
+ switch (result_type) {
+ case BinaryOpIC::UNINITIALIZED:
+ if (expr->op() == Token::DIV) {
+ return TypeInfo::Double();
+ }
+ return TypeInfo::Smi();
+ case BinaryOpIC::SMI:
+ return TypeInfo::Smi();
+ case BinaryOpIC::INT32:
+ return TypeInfo::Integer32();
+ case BinaryOpIC::HEAP_NUMBER:
+ return TypeInfo::Double();
+ default:
+ return unknown;
+ }
+ case BinaryOpIC::INT32:
+ if (expr->op() == Token::DIV ||
+ result_type == BinaryOpIC::HEAP_NUMBER) {
+ return TypeInfo::Double();
+ }
+ return TypeInfo::Integer32();
+ case BinaryOpIC::HEAP_NUMBER:
+ return TypeInfo::Double();
+ case BinaryOpIC::BOTH_STRING:
+ return TypeInfo::String();
+ case BinaryOpIC::STRING:
+ case BinaryOpIC::GENERIC:
+ return unknown;
+ default:
+ return unknown;
+ }
}
- // Not a binary op stub.
- *left = *right = *result = unknown;
+ return unknown;
}
@@ -446,8 +447,28 @@ TypeInfo TypeFeedbackOracle::SwitchType(CaseClause* clause) {
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_compare_ic_stub()) return unknown;
- CompareIC::State state = ICCompareStub::CompareState(code->stub_info());
- return TypeFromCompareType(state);
+ CompareIC::State state = static_cast<CompareIC::State>(code->compare_state());
+ switch (state) {
+ case CompareIC::UNINITIALIZED:
+ // Uninitialized means never executed.
+ // TODO(fschneider): Introduce a separate value for never-executed ICs.
+ return unknown;
+ case CompareIC::SMIS:
+ return TypeInfo::Smi();
+ case CompareIC::STRINGS:
+ return TypeInfo::String();
+ case CompareIC::SYMBOLS:
+ return TypeInfo::Symbol();
+ case CompareIC::HEAP_NUMBERS:
+ return TypeInfo::Number();
+ case CompareIC::OBJECTS:
+ case CompareIC::KNOWN_OBJECTS:
+ // TODO(kasperl): We really need a type for JS objects here.
+ return TypeInfo::NonPrimitive();
+ case CompareIC::GENERIC:
+ default:
+ return unknown;
+ }
}
@@ -458,14 +479,9 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
Handle<Code> code = Handle<Code>::cast(object);
if (!code->is_binary_op_stub()) return unknown;
- BinaryOpIC::TypeInfo left_type, right_type, unused_result_type;
- BinaryOpStub::decode_types_from_minor_key(code->stub_info(), &left_type,
- &right_type, &unused_result_type);
- // CountOperations should always have +1 or -1 as their right input.
- ASSERT(right_type == BinaryOpIC::SMI ||
- right_type == BinaryOpIC::UNINITIALIZED);
-
- switch (left_type) {
+ BinaryOpIC::TypeInfo type = static_cast<BinaryOpIC::TypeInfo>(
+ code->binary_op_type());
+ switch (type) {
case BinaryOpIC::UNINITIALIZED:
case BinaryOpIC::SMI:
return TypeInfo::Smi();
@@ -473,6 +489,7 @@ TypeInfo TypeFeedbackOracle::IncrementType(CountOperation* expr) {
return TypeInfo::Integer32();
case BinaryOpIC::HEAP_NUMBER:
return TypeInfo::Double();
+ case BinaryOpIC::BOTH_STRING:
case BinaryOpIC::STRING:
case BinaryOpIC::GENERIC:
return unknown;
diff --git a/deps/v8/src/type-info.h b/deps/v8/src/type-info.h
index 8b2ec4931..00d88c2af 100644
--- a/deps/v8/src/type-info.h
+++ b/deps/v8/src/type-info.h
@@ -204,7 +204,6 @@ class TypeInfo {
kNonPrimitive = 0x40, // 1000000
kUninitialized = 0x7f // 1111111
};
-
explicit inline TypeInfo(Type t) : type_(t) { }
Type type_;
@@ -288,14 +287,9 @@ class TypeFeedbackOracle: public ZoneObject {
// Get type information for arithmetic operations and compares.
TypeInfo UnaryType(UnaryOperation* expr);
- void BinaryType(BinaryOperation* expr,
- TypeInfo* left,
- TypeInfo* right,
- TypeInfo* result);
- void CompareType(CompareOperation* expr,
- TypeInfo* left_type,
- TypeInfo* right_type,
- TypeInfo* overall_type);
+ TypeInfo BinaryType(BinaryOperation* expr);
+ TypeInfo CompareType(CompareOperation* expr);
+ bool IsSymbolCompare(CompareOperation* expr);
Handle<Map> GetCompareMap(CompareOperation* expr);
TypeInfo SwitchType(CaseClause* clause);
TypeInfo IncrementType(CountOperation* expr);
diff --git a/deps/v8/src/uri.js b/deps/v8/src/uri.js
index 1de22f8ae..b195f3da7 100644
--- a/deps/v8/src/uri.js
+++ b/deps/v8/src/uri.js
@@ -165,11 +165,11 @@ function URIDecodeOctets(octets, result, index) {
throw new $URIError("URI malformed");
}
if (value < 0x10000) {
- %_TwoByteSeqStringSetChar(result, index++, value);
+ result[index++] = value;
return index;
} else {
- %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
- %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
+ result[index++] = (value >> 10) + 0xd7c0;
+ result[index++] = (value & 0x3ff) + 0xdc00;
return index;
}
}
@@ -178,72 +178,43 @@ function URIDecodeOctets(octets, result, index) {
// ECMA-262, section 15.1.3
function Encode(uri, unescape) {
var uriLength = uri.length;
- var array = new InternalArray(uriLength);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
for (var k = 0; k < uriLength; k++) {
var cc1 = uri.charCodeAt(k);
if (unescape(cc1)) {
- array[index++] = cc1;
+ result[index++] = cc1;
} else {
if (cc1 >= 0xDC00 && cc1 <= 0xDFFF) throw new $URIError("URI malformed");
if (cc1 < 0xD800 || cc1 > 0xDBFF) {
- index = URIEncodeSingle(cc1, array, index);
+ index = URIEncodeSingle(cc1, result, index);
} else {
k++;
if (k == uriLength) throw new $URIError("URI malformed");
var cc2 = uri.charCodeAt(k);
if (cc2 < 0xDC00 || cc2 > 0xDFFF) throw new $URIError("URI malformed");
- index = URIEncodePair(cc1, cc2, array, index);
+ index = URIEncodePair(cc1, cc2, result, index);
}
}
}
-
- var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
- for (var i = 0; i < array.length; i++) {
- %_OneByteSeqStringSetChar(result, i, array[i]);
- }
- return result;
+ return %StringFromCharCodeArray(result);
}
// ECMA-262, section 15.1.3
function Decode(uri, reserved) {
var uriLength = uri.length;
- var one_byte = %NewString(uriLength, NEW_ONE_BYTE_STRING);
+ // We are going to pass result to %StringFromCharCodeArray
+ // which does not expect any getters/setters installed
+ // on the incoming array.
+ var result = new InternalArray(uriLength);
var index = 0;
- var k = 0;
-
- // Optimistically assume ascii string.
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
- if (k + 2 >= uriLength) throw new $URIError("URI malformed");
- var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
- if (cc >> 7) break; // Assumption wrong, two byte string.
- if (reserved(cc)) {
- %_OneByteSeqStringSetChar(one_byte, index++, 37); // '%'.
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
- %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
- } else {
- %_OneByteSeqStringSetChar(one_byte, index++, cc);
- }
- k += 2;
- } else {
- if (code > 0x7f) break; // Assumption wrong, two byte string.
- %_OneByteSeqStringSetChar(one_byte, index++, code);
- }
- }
-
- one_byte = %TruncateString(one_byte, index);
- if (k == uriLength) return one_byte;
-
- // Write into two byte string.
- var two_byte = %NewString(uriLength - k, NEW_TWO_BYTE_STRING);
- index = 0;
-
- for ( ; k < uriLength; k++) {
- var code = uri.charCodeAt(k);
- if (code == 37) { // '%'
+ for (var k = 0; k < uriLength; k++) {
+ var ch = uri.charAt(k);
+ if (ch == '%') {
if (k + 2 >= uriLength) throw new $URIError("URI malformed");
var cc = URIHexCharsToCharCode(uri.charCodeAt(++k), uri.charCodeAt(++k));
if (cc >> 7) {
@@ -258,21 +229,22 @@ function Decode(uri, reserved) {
octets[i] = URIHexCharsToCharCode(uri.charCodeAt(++k),
uri.charCodeAt(++k));
}
- index = URIDecodeOctets(octets, two_byte, index);
- } else if (reserved(cc)) {
- %_TwoByteSeqStringSetChar(two_byte, index++, 37); // '%'.
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
- %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
+ index = URIDecodeOctets(octets, result, index);
} else {
- %_TwoByteSeqStringSetChar(two_byte, index++, cc);
+ if (reserved(cc)) {
+ result[index++] = 37; // Char code of '%'.
+ result[index++] = uri.charCodeAt(k - 1);
+ result[index++] = uri.charCodeAt(k);
+ } else {
+ result[index++] = cc;
+ }
}
} else {
- %_TwoByteSeqStringSetChar(two_byte, index++, code);
+ result[index++] = ch.charCodeAt(0);
}
}
-
- two_byte = %TruncateString(two_byte, index);
- return one_byte + two_byte;
+ result.length = index;
+ return %StringFromCharCodeArray(result);
}
diff --git a/deps/v8/src/v8-counters.cc b/deps/v8/src/v8-counters.cc
index 4107dd3e4..3f83dffca 100644
--- a/deps/v8/src/v8-counters.cc
+++ b/deps/v8/src/v8-counters.cc
@@ -86,6 +86,17 @@ Counters::Counters() {
size_of_FIXED_ARRAY_##name##_ = size_of_FIXED_ARRAY_##name;
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+
+ StatsCounter state_counters[] = {
+#define COUNTER_NAME(name) \
+ { "c:V8.State" #name, NULL, false },
+ STATE_TAG_LIST(COUNTER_NAME)
+#undef COUNTER_NAME
+ };
+
+ for (int i = 0; i < kSlidingStateWindowCounterCount; ++i) {
+ state_counters_[i] = state_counters[i];
+ }
}
void Counters::ResetHistograms() {
diff --git a/deps/v8/src/v8-counters.h b/deps/v8/src/v8-counters.h
index 986e6dd6b..fad345481 100644
--- a/deps/v8/src/v8-counters.h
+++ b/deps/v8/src/v8-counters.h
@@ -50,6 +50,7 @@ namespace internal {
HT(compile_eval, V8.CompileEval) \
HT(compile_lazy, V8.CompileLazy)
+
#define HISTOGRAM_PERCENTAGE_LIST(HP) \
HP(external_fragmentation_total, \
V8.MemoryExternalFragmentationTotal) \
@@ -373,9 +374,16 @@ class Counters {
kSizeOfFIXED_ARRAY__##name,
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(COUNTER_ID)
#undef COUNTER_ID
+#define COUNTER_ID(name) k_##name,
+ STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
stats_counter_count
};
+ StatsCounter* state_counters(StateTag state) {
+ return &state_counters_[state];
+ }
+
void ResetHistograms();
private:
@@ -418,6 +426,15 @@ class Counters {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(SC)
#undef SC
+ enum {
+#define COUNTER_ID(name) __##name,
+ STATE_TAG_LIST(COUNTER_ID)
+#undef COUNTER_ID
+ kSlidingStateWindowCounterCount
+ };
+
+ // Sliding state window counters.
+ StatsCounter state_counters_[kSlidingStateWindowCounterCount];
friend class Isolate;
DISALLOW_IMPLICIT_CONSTRUCTORS(Counters);
diff --git a/deps/v8/src/v8.cc b/deps/v8/src/v8.cc
index b5aad9022..2407037b3 100644
--- a/deps/v8/src/v8.cc
+++ b/deps/v8/src/v8.cc
@@ -38,7 +38,6 @@
#include "hydrogen.h"
#include "lithium-allocator.h"
#include "log.h"
-#include "objects.h"
#include "once.h"
#include "platform.h"
#include "runtime-profiler.h"
@@ -115,7 +114,6 @@ void V8::TearDown() {
ElementsAccessor::TearDown();
LOperand::TearDownCaches();
- ExternalReference::TearDownMathExpData();
RegisteredExtension::UnregisterAll();
is_running_ = false;
@@ -218,22 +216,14 @@ void V8::RemoveCallCompletedCallback(CallCompletedCallback callback) {
void V8::FireCallCompletedCallback(Isolate* isolate) {
- bool has_call_completed_callbacks = call_completed_callbacks_ != NULL;
- bool observer_delivery_pending =
- FLAG_harmony_observation && isolate->observer_delivery_pending();
- if (!has_call_completed_callbacks && !observer_delivery_pending) return;
+ if (call_completed_callbacks_ == NULL) return;
HandleScopeImplementer* handle_scope_implementer =
isolate->handle_scope_implementer();
if (!handle_scope_implementer->CallDepthIsZero()) return;
// Fire callbacks. Increase call depth to prevent recursive callbacks.
handle_scope_implementer->IncrementCallDepth();
- if (observer_delivery_pending) {
- JSObject::DeliverChangeRecords(isolate);
- }
- if (has_call_completed_callbacks) {
- for (int i = 0; i < call_completed_callbacks_->length(); i++) {
- call_completed_callbacks_->at(i)();
- }
+ for (int i = 0; i < call_completed_callbacks_->length(); i++) {
+ call_completed_callbacks_->at(i)();
}
handle_scope_implementer->DecrementCallDepth();
}
diff --git a/deps/v8/src/v8conversions.cc b/deps/v8/src/v8conversions.cc
index c6755d593..bf175e50b 100644
--- a/deps/v8/src/v8conversions.cc
+++ b/deps/v8/src/v8conversions.cc
@@ -84,7 +84,7 @@ double StringToDouble(UnicodeCache* unicode_cache,
String* str, int flags, double empty_string_val) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
- const char* begin = SeqOneByteString::cast(str)->GetChars();
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
@@ -109,7 +109,7 @@ double StringToInt(UnicodeCache* unicode_cache,
int radix) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
- const char* begin = SeqOneByteString::cast(str)->GetChars();
+ const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
diff --git a/deps/v8/src/v8globals.h b/deps/v8/src/v8globals.h
index 5920d33aa..95390adcf 100644
--- a/deps/v8/src/v8globals.h
+++ b/deps/v8/src/v8globals.h
@@ -351,13 +351,20 @@ struct AccessorDescriptor {
// VMState object leaves a state by popping the current state from the
// stack.
+#define STATE_TAG_LIST(V) \
+ V(JS) \
+ V(GC) \
+ V(COMPILER) \
+ V(PARALLEL_COMPILER_PROLOGUE) \
+ V(OTHER) \
+ V(EXTERNAL)
+
enum StateTag {
- JS,
- GC,
- COMPILER,
- PARALLEL_COMPILER,
- OTHER,
- EXTERNAL
+#define DEF_STATE_TAG(name) name,
+ STATE_TAG_LIST(DEF_STATE_TAG)
+#undef DEF_STATE_TAG
+ // Pseudo-types.
+ state_tag_count
};
@@ -476,19 +483,11 @@ enum VariableMode {
CONST, // declared via 'const' declarations
- LET, // declared via 'let' declarations (first lexical)
+ LET, // declared via 'let' declarations
CONST_HARMONY, // declared via 'const' declarations in harmony mode
- MODULE, // declared via 'module' declaration (last lexical)
-
// Variables introduced by the compiler:
- INTERNAL, // like VAR, but not user-visible (may or may not
- // be in a context)
-
- TEMPORARY, // temporary variables (not user-visible), never
- // in a context
-
DYNAMIC, // always require dynamic lookup (we don't know
// the declaration)
@@ -496,10 +495,16 @@ enum VariableMode {
// variable is global unless it has been shadowed
// by an eval-introduced variable
- DYNAMIC_LOCAL // requires dynamic lookup, but we know that the
+ DYNAMIC_LOCAL, // requires dynamic lookup, but we know that the
// variable is local and where it is unless it
// has been shadowed by an eval-introduced
// variable
+
+ INTERNAL, // like VAR, but not user-visible (may or may not
+ // be in a context)
+
+ TEMPORARY // temporary variables (not user-visible), never
+ // in a context
};
@@ -509,17 +514,17 @@ inline bool IsDynamicVariableMode(VariableMode mode) {
inline bool IsDeclaredVariableMode(VariableMode mode) {
- return mode >= VAR && mode <= MODULE;
+ return mode >= VAR && mode <= CONST_HARMONY;
}
inline bool IsLexicalVariableMode(VariableMode mode) {
- return mode >= LET && mode <= MODULE;
+ return mode >= LET && mode <= CONST_HARMONY;
}
inline bool IsImmutableVariableMode(VariableMode mode) {
- return mode == CONST || (mode >= CONST_HARMONY && mode <= MODULE);
+ return mode == CONST || mode == CONST_HARMONY;
}
diff --git a/deps/v8/src/v8natives.js b/deps/v8/src/v8natives.js
index e8752c84b..e2e642941 100644
--- a/deps/v8/src/v8natives.js
+++ b/deps/v8/src/v8natives.js
@@ -60,17 +60,7 @@ function InstallFunctions(object, attributes, functions) {
%ToFastProperties(object);
}
-
-// Helper function to install a getter only property.
-function InstallGetter(object, name, getter) {
- %FunctionSetName(getter, name);
- %FunctionRemovePrototype(getter);
- %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM);
- %SetNativeFlag(getter);
-}
-
-
-// Prevents changes to the prototype of a built-in function.
+// Prevents changes to the prototype of a built-infunction.
// The "prototype" property of the function object is made non-configurable,
// and the prototype object is made non-extensible. The latter prevents
// changing the __proto__ property.
@@ -970,7 +960,7 @@ function ToStringArray(obj, trap) {
}
var n = ToUint32(obj.length);
var array = new $Array(n);
- var names = { __proto__: null }; // TODO(rossberg): use sets once ready.
+ var names = {}; // TODO(rossberg): use sets once they are ready.
for (var index = 0; index < n; index++) {
var s = ToString(obj[index]);
if (%HasLocalProperty(names, s)) {
@@ -1025,7 +1015,7 @@ function ObjectGetOwnPropertyNames(obj) {
}
// Property names are expected to be unique strings.
- var propertySet = { __proto__: null };
+ var propertySet = {};
var j = 0;
for (var i = 0; i < propertyNames.length; ++i) {
var name = ToString(propertyNames[i]);
@@ -1066,7 +1056,7 @@ function ObjectDefineProperty(obj, p, attributes) {
// Clone the attributes object for protection.
// TODO(rossberg): not spec'ed yet, so not sure if this should involve
// non-own properties as it does (or non-enumerable ones, as it doesn't?).
- var attributesClone = { __proto__: null };
+ var attributesClone = {};
for (var a in attributes) {
attributesClone[a] = attributes[a];
}
@@ -1413,7 +1403,11 @@ function NumberToString(radix) {
// ECMA-262 section 15.7.4.3
function NumberToLocaleString() {
- return %_CallFunction(this, NumberToString);
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toLocaleString"]);
+ }
+ return this.toString();
}
@@ -1430,76 +1424,50 @@ function NumberValueOf() {
// ECMA-262 section 15.7.4.5
function NumberToFixed(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toFixed", this]);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
- }
var f = TO_INTEGER(fractionDigits);
-
if (f < 0 || f > 20) {
throw new $RangeError("toFixed() digits argument must be between 0 and 20");
}
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toFixed"]);
+ }
+ var x = ToNumber(this);
return %NumberToFixed(x, f);
}
// ECMA-262 section 15.7.4.6
function NumberToExponential(fractionDigits) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toExponential", this]);
+ var f = -1;
+ if (!IS_UNDEFINED(fractionDigits)) {
+ f = TO_INTEGER(fractionDigits);
+ if (f < 0 || f > 20) {
+ throw new $RangeError(
+ "toExponential() argument must be between 0 and 20");
}
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
}
- var f = IS_UNDEFINED(fractionDigits) ? void 0 : TO_INTEGER(fractionDigits);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
- if (IS_UNDEFINED(f)) {
- f = -1; // Signal for runtime function that f is not defined.
- } else if (f < 0 || f > 20) {
- throw new $RangeError("toExponential() argument must be between 0 and 20");
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toExponential"]);
}
+ var x = ToNumber(this);
return %NumberToExponential(x, f);
}
// ECMA-262 section 15.7.4.7
function NumberToPrecision(precision) {
- var x = this;
- if (!IS_NUMBER(this)) {
- if (!IS_NUMBER_WRAPPER(this)) {
- throw MakeTypeError("incompatible_method_receiver",
- ["Number.prototype.toPrecision", this]);
- }
- // Get the value of this number in case it's an object.
- x = %_ValueOf(this);
+ if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
+ throw MakeTypeError("called_on_null_or_undefined",
+ ["Number.prototype.toPrecision"]);
}
if (IS_UNDEFINED(precision)) return ToString(%_ValueOf(this));
var p = TO_INTEGER(precision);
-
- if (NUMBER_IS_NAN(x)) return "NaN";
- if (x == 1/0) return "Infinity";
- if (x == -1/0) return "-Infinity";
-
if (p < 1 || p > 21) {
throw new $RangeError("toPrecision() argument must be between 1 and 21");
}
+ var x = ToNumber(this);
return %NumberToPrecision(x, p);
}
diff --git a/deps/v8/src/v8utils.h b/deps/v8/src/v8utils.h
index 111abdf8b..9072b4e28 100644
--- a/deps/v8/src/v8utils.h
+++ b/deps/v8/src/v8utils.h
@@ -209,8 +209,6 @@ INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
template <typename sourcechar, typename sinkchar>
void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
- ASSERT(chars >= 0);
- if (chars == 0) return;
sinkchar* limit = dest + chars;
#ifdef V8_HOST_CAN_READ_UNALIGNED
if (sizeof(*dest) == sizeof(*src)) {
diff --git a/deps/v8/src/variables.cc b/deps/v8/src/variables.cc
index 916121791..0416f3a39 100644
--- a/deps/v8/src/variables.cc
+++ b/deps/v8/src/variables.cc
@@ -41,9 +41,8 @@ const char* Variable::Mode2String(VariableMode mode) {
switch (mode) {
case VAR: return "VAR";
case CONST: return "CONST";
- case LET: return "LET";
case CONST_HARMONY: return "CONST_HARMONY";
- case MODULE: return "MODULE";
+ case LET: return "LET";
case DYNAMIC: return "DYNAMIC";
case DYNAMIC_GLOBAL: return "DYNAMIC_GLOBAL";
case DYNAMIC_LOCAL: return "DYNAMIC_LOCAL";
@@ -85,8 +84,7 @@ Variable::Variable(Scope* scope,
bool Variable::IsGlobalObjectProperty() const {
// Temporaries are never global, they must always be allocated in the
// activation frame.
- return (IsDynamicVariableMode(mode_) ||
- (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
+ return mode_ != TEMPORARY && !IsLexicalVariableMode(mode_)
&& scope_ != NULL && scope_->is_global_scope();
}
diff --git a/deps/v8/src/variables.h b/deps/v8/src/variables.h
index bb35ee88b..ba26b8047 100644
--- a/deps/v8/src/variables.h
+++ b/deps/v8/src/variables.h
@@ -130,8 +130,8 @@ class Variable: public ZoneObject {
bool is_arguments() const { return kind_ == ARGUMENTS; }
// True if the variable is named eval and not known to be shadowed.
- bool is_possibly_eval(Isolate* isolate) const {
- return IsVariable(isolate->factory()->eval_symbol());
+ bool is_possibly_eval() const {
+ return IsVariable(FACTORY->eval_symbol());
}
Variable* local_if_not_shadowed() const {
diff --git a/deps/v8/src/version.cc b/deps/v8/src/version.cc
index 31d93d678..213259f5f 100644
--- a/deps/v8/src/version.cc
+++ b/deps/v8/src/version.cc
@@ -33,9 +33,9 @@
// NOTE these macros are used by the SCons build script so their names
// cannot be changed without changing the SCons build script.
#define MAJOR_VERSION 3
-#define MINOR_VERSION 15
-#define BUILD_NUMBER 11
-#define PATCH_LEVEL 15
+#define MINOR_VERSION 14
+#define BUILD_NUMBER 5
+#define PATCH_LEVEL 0
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0
diff --git a/deps/v8/src/vm-state-inl.h b/deps/v8/src/vm-state-inl.h
index bc481605f..384940dfa 100644
--- a/deps/v8/src/vm-state-inl.h
+++ b/deps/v8/src/vm-state-inl.h
@@ -47,8 +47,8 @@ inline const char* StateToString(StateTag state) {
return "GC";
case COMPILER:
return "COMPILER";
- case PARALLEL_COMPILER:
- return "PARALLEL_COMPILER";
+ case PARALLEL_COMPILER_PROLOGUE:
+ return "PARALLEL_COMPILER_PROLOGUE";
case OTHER:
return "OTHER";
case EXTERNAL:
@@ -67,10 +67,6 @@ VMState::VMState(Isolate* isolate, StateTag tag)
LOG(isolate, UncheckedStringEvent("From", StateToString(previous_tag_)));
}
- if (FLAG_log_timer_events) {
- LOG(isolate, ExternalSwitch(previous_tag_, tag));
- }
-
isolate_->SetCurrentVMState(tag);
}
@@ -84,10 +80,6 @@ VMState::~VMState() {
UncheckedStringEvent("To", StateToString(previous_tag_)));
}
- if (FLAG_log_timer_events) {
- LOG(isolate_, ExternalSwitch(isolate_->current_vm_state(), previous_tag_));
- }
-
isolate_->SetCurrentVMState(previous_tag_);
}
diff --git a/deps/v8/src/x64/assembler-x64-inl.h b/deps/v8/src/x64/assembler-x64-inl.h
index f86417469..d022340c1 100644
--- a/deps/v8/src/x64/assembler-x64-inl.h
+++ b/deps/v8/src/x64/assembler-x64-inl.h
@@ -42,9 +42,6 @@ namespace internal {
// Implementation of Assembler
-static const byte kCallOpcode = 0xE8;
-
-
void Assembler::emitl(uint32_t x) {
Memory::uint32_at(pc_) = x;
pc_ += sizeof(uint32_t);
@@ -220,12 +217,6 @@ void RelocInfo::apply(intptr_t delta) {
} else if (IsCodeTarget(rmode_)) {
Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
CPU::FlushICache(pc_, sizeof(int32_t));
- } else if (rmode_ == CODE_AGE_SEQUENCE) {
- if (*pc_ == kCallOpcode) {
- int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
- *p -= static_cast<int32_t>(delta); // Relocate entry.
- CPU::FlushICache(p, sizeof(uint32_t));
- }
}
}
@@ -364,21 +355,6 @@ bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
}
-Code* RelocInfo::code_age_stub() {
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- ASSERT(*pc_ == kCallOpcode);
- return Code::GetCodeFromTargetAddress(
- Assembler::target_address_at(pc_ + 1));
-}
-
-
-void RelocInfo::set_code_age_stub(Code* stub) {
- ASSERT(*pc_ == kCallOpcode);
- ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
- Assembler::set_target_address_at(pc_ + 1, stub->instruction_start());
-}
-
-
Address RelocInfo::call_address() {
ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
(IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
@@ -432,8 +408,6 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- visitor->VisitCodeAgeSequence(this);
#ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
@@ -462,8 +436,6 @@ void RelocInfo::Visit(Heap* heap) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(this);
CPU::FlushICache(pc_, sizeof(Address));
- } else if (RelocInfo::IsCodeAgeSequence(mode)) {
- StaticVisitor::VisitCodeAgeSequence(heap, this);
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
diff --git a/deps/v8/src/x64/assembler-x64.cc b/deps/v8/src/x64/assembler-x64.cc
index f136b6559..862a73557 100644
--- a/deps/v8/src/x64/assembler-x64.cc
+++ b/deps/v8/src/x64/assembler-x64.cc
@@ -346,20 +346,50 @@ bool Operand::AddressUsesRegister(Register reg) const {
static void InitCoverageLog();
#endif
-Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
- : AssemblerBase(isolate, buffer, buffer_size),
+Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
+ : AssemblerBase(arg_isolate),
code_targets_(100),
- positions_recorder_(this) {
+ positions_recorder_(this),
+ emit_debug_code_(FLAG_debug_code),
+ predictable_code_size_(false) {
+ if (buffer == NULL) {
+ // Do our own buffer management.
+ if (buffer_size <= kMinimalBufferSize) {
+ buffer_size = kMinimalBufferSize;
+
+ if (isolate() != NULL && isolate()->assembler_spare_buffer() != NULL) {
+ buffer = isolate()->assembler_spare_buffer();
+ isolate()->set_assembler_spare_buffer(NULL);
+ }
+ }
+ if (buffer == NULL) {
+ buffer_ = NewArray<byte>(buffer_size);
+ } else {
+ buffer_ = static_cast<byte*>(buffer);
+ }
+ buffer_size_ = buffer_size;
+ own_buffer_ = true;
+ } else {
+ // Use externally provided buffer instead.
+ ASSERT(buffer_size > 0);
+ buffer_ = static_cast<byte*>(buffer);
+ buffer_size_ = buffer_size;
+ own_buffer_ = false;
+ }
+
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it.
#ifdef DEBUG
if (own_buffer_) {
- memset(buffer_, 0xCC, buffer_size_); // int3
+ memset(buffer_, 0xCC, buffer_size); // int3
}
#endif
- reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+ // Set up buffer pointers.
+ ASSERT(buffer_ != NULL);
+ pc_ = buffer_;
+ reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
#ifdef GENERATED_CODE_COVERAGE
@@ -368,6 +398,19 @@ Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
}
+Assembler::~Assembler() {
+ if (own_buffer_) {
+ if (isolate() != NULL &&
+ isolate()->assembler_spare_buffer() == NULL &&
+ buffer_size_ == kMinimalBufferSize) {
+ isolate()->set_assembler_spare_buffer(buffer_);
+ } else {
+ DeleteArray(buffer_);
+ }
+ }
+}
+
+
void Assembler::GetCode(CodeDesc* desc) {
// Finalize code (at this point overflow() may be true, but the gap ensures
// that we are still not overlapping instructions and relocation info).
@@ -1195,13 +1238,13 @@ void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
// Determine whether we can use 1-byte offsets for backwards branches,
// which have a max range of 128 bytes.
- // We also need to check predictable_code_size() flag here, because on x64,
- // when the full code generator recompiles code for debugging, some places
- // need to be padded out to a certain size. The debugger is keeping track of
- // how often it did this so that it can adjust return addresses on the
- // stack, but if the size of jump instructions can also change, that's not
- // enough and the calculated offsets would be incorrect.
- if (is_int8(offs - short_size) && !predictable_code_size()) {
+ // We also need to check the predictable_code_size_ flag here, because
+ // on x64, when the full code generator recompiles code for debugging, some
+ // places need to be padded out to a certain size. The debugger is keeping
+ // track of how often it did this so that it can adjust return addresses on
+ // the stack, but if the size of jump instructions can also change, that's
+ // not enough and the calculated offsets would be incorrect.
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 0111 tttn #8-bit disp.
emit(0x70 | cc);
emit((offs - short_size) & 0xFF);
@@ -1258,7 +1301,7 @@ void Assembler::jmp(Label* L, Label::Distance distance) {
if (L->is_bound()) {
int offs = L->pos() - pc_offset() - 1;
ASSERT(offs <= 0);
- if (is_int8(offs - short_size) && !predictable_code_size()) {
+ if (is_int8(offs - short_size) && !predictable_code_size_) {
// 1110 1011 #8-bit disp.
emit(0xEB);
emit((offs - short_size) & 0xFF);
@@ -2807,16 +2850,6 @@ void Assembler::addsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::addsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x58);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2827,16 +2860,6 @@ void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
}
-void Assembler::mulsd(XMMRegister dst, const Operand& src) {
- EnsureSpace ensure_space(this);
- emit(0xF2);
- emit_optional_rex_32(dst, src);
- emit(0x0F);
- emit(0x59);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::subsd(XMMRegister dst, XMMRegister src) {
EnsureSpace ensure_space(this);
emit(0xF2);
@@ -2951,15 +2974,6 @@ void Assembler::movmskpd(Register dst, XMMRegister src) {
}
-void Assembler::movmskps(Register dst, XMMRegister src) {
- EnsureSpace ensure_space(this);
- emit_optional_rex_32(dst, src);
- emit(0x0f);
- emit(0x50);
- emit_sse_operand(dst, src);
-}
-
-
void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
Register ireg = { reg.code() };
emit_operand(ireg, adr);
@@ -3033,8 +3047,7 @@ void Assembler::RecordComment(const char* msg, bool force) {
const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
- 1 << RelocInfo::INTERNAL_REFERENCE |
- 1 << RelocInfo::CODE_AGE_SEQUENCE;
+ 1 << RelocInfo::INTERNAL_REFERENCE;
bool RelocInfo::IsCodedSpecially() {
diff --git a/deps/v8/src/x64/assembler-x64.h b/deps/v8/src/x64/assembler-x64.h
index beb695673..e8b0be9ba 100644
--- a/deps/v8/src/x64/assembler-x64.h
+++ b/deps/v8/src/x64/assembler-x64.h
@@ -556,7 +556,15 @@ class Assembler : public AssemblerBase {
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size);
- virtual ~Assembler() { }
+ ~Assembler();
+
+ // Overrides the default provided by FLAG_debug_code.
+ void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
+
+ // Avoids using instructions that vary in size in unpredictable ways between
+ // the snapshot and the running VM. This is needed by the full compiler so
+ // that it can recompile code with debug support and fix the PC.
+ void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
@@ -1013,14 +1021,6 @@ class Assembler : public AssemblerBase {
shift(dst, imm8, 0x1);
}
- void rorl(Register dst, Immediate imm8) {
- shift_32(dst, imm8, 0x1);
- }
-
- void rorl_cl(Register dst) {
- shift_32(dst, 0x1);
- }
-
// Shifts dst:src left by cl bits, affecting only dst.
void shld(Register dst, Register src);
@@ -1363,10 +1363,8 @@ class Assembler : public AssemblerBase {
void cvtsd2siq(Register dst, XMMRegister src);
void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, const Operand& src);
void subsd(XMMRegister dst, XMMRegister src);
void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, const Operand& src);
void divsd(XMMRegister dst, XMMRegister src);
void andpd(XMMRegister dst, XMMRegister src);
@@ -1388,7 +1386,6 @@ class Assembler : public AssemblerBase {
void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
void movmskpd(Register dst, XMMRegister src);
- void movmskps(Register dst, XMMRegister src);
// The first argument is the reg field, the second argument is the r/m field.
void emit_sse_operand(XMMRegister dst, XMMRegister src);
@@ -1419,6 +1416,8 @@ class Assembler : public AssemblerBase {
void db(uint8_t data);
void dd(uint32_t data);
+ int pc_offset() const { return static_cast<int>(pc_ - buffer_); }
+
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
// Check if there is less than kGap bytes available in the buffer.
@@ -1437,10 +1436,15 @@ class Assembler : public AssemblerBase {
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512*MB;
+ static const int kMinimalBufferSize = 4*KB;
byte byte_at(int pos) { return buffer_[pos]; }
void set_byte_at(int pos, byte value) { buffer_[pos] = value; }
+ protected:
+ bool emit_debug_code() const { return emit_debug_code_; }
+ bool predictable_code_size() const { return predictable_code_size_; }
+
private:
byte* addr_at(int pos) { return buffer_ + pos; }
uint32_t long_at(int pos) {
@@ -1628,12 +1632,24 @@ class Assembler : public AssemblerBase {
friend class EnsureSpace;
friend class RegExpMacroAssemblerX64;
+ // Code buffer:
+ // The buffer into which code and relocation info are generated.
+ byte* buffer_;
+ int buffer_size_;
+ // True if the assembler owns the buffer, false if buffer is external.
+ bool own_buffer_;
+
// code generation
+ byte* pc_; // the program counter; moves forward
RelocInfoWriter reloc_info_writer;
List< Handle<Code> > code_targets_;
PositionsRecorder positions_recorder_;
+
+ bool emit_debug_code_;
+ bool predictable_code_size_;
+
friend class PositionsRecorder;
};
diff --git a/deps/v8/src/x64/builtins-x64.cc b/deps/v8/src/x64/builtins-x64.cc
index ed0ec684f..9e4153a86 100644
--- a/deps/v8/src/x64/builtins-x64.cc
+++ b/deps/v8/src/x64/builtins-x64.cc
@@ -606,46 +606,6 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
}
-static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
- // For now, we are relying on the fact that make_code_young doesn't do any
- // garbage collection which allows us to save/restore the registers without
- // worrying about which of them contain pointers. We also don't build an
- // internal frame to make the code faster, since we shouldn't have to do stack
- // crawls in MakeCodeYoung. This seems a bit fragile.
-
- // Re-execute the code that was patched back to the young age when
- // the stub returns.
- __ subq(Operand(rsp, 0), Immediate(5));
- __ Pushad();
-#ifdef _WIN64
- __ movq(rcx, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#else
- __ movq(rdi, Operand(rsp, kNumSafepointRegisters * kPointerSize));
-#endif
- { // NOLINT
- FrameScope scope(masm, StackFrame::MANUAL);
- __ PrepareCallCFunction(1);
- __ CallCFunction(
- ExternalReference::get_make_code_young_function(masm->isolate()), 1);
- }
- __ Popad();
- __ ret(0);
-}
-
-
-#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C) \
-void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-} \
-void Builtins::Generate_Make##C##CodeYoungAgainOddMarking( \
- MacroAssembler* masm) { \
- GenerateMakeCodeYoungAgainCommon(masm); \
-}
-CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
-#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
-
-
static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
Deoptimizer::BailoutType type) {
// Enter an internal frame.
diff --git a/deps/v8/src/x64/code-stubs-x64.cc b/deps/v8/src/x64/code-stubs-x64.cc
index 970571840..675d404b9 100644
--- a/deps/v8/src/x64/code-stubs-x64.cc
+++ b/deps/v8/src/x64/code-stubs-x64.cc
@@ -637,10 +637,6 @@ void ToBooleanStub::GenerateTypeTransition(MacroAssembler* masm) {
class FloatingPointHelper : public AllStatic {
public:
- enum ConvertUndefined {
- CONVERT_UNDEFINED_TO_ZERO,
- BAILOUT_ON_UNDEFINED
- };
// Load the operands from rdx and rax into xmm0 and xmm1, as doubles.
// If the operands are not both numbers, jump to not_numbers.
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis.
@@ -676,8 +672,7 @@ class FloatingPointHelper : public AllStatic {
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined);
+ Label* on_not_smis);
};
@@ -1002,15 +997,16 @@ void UnaryOpStub::PrintName(StringStream* stream) {
}
-void BinaryOpStub::Initialize() {}
-
-
void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ pop(rcx); // Save return address.
__ push(rdx);
__ push(rax);
// Left and right arguments are now on top.
+ // Push this stub's key. Although the operation and the type info are
+ // encoded into the key, the encoding is opaque, so push them too.
__ Push(Smi::FromInt(MinorKey()));
+ __ Push(Smi::FromInt(op_));
+ __ Push(Smi::FromInt(operands_type_));
__ push(rcx); // Push return address.
@@ -1019,16 +1015,69 @@ void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
__ TailCallExternalReference(
ExternalReference(IC_Utility(IC::kBinaryOp_Patch),
masm->isolate()),
- 3,
+ 5,
1);
}
-static void BinaryOpStub_GenerateSmiCode(
+void BinaryOpStub::Generate(MacroAssembler* masm) {
+ // Explicitly allow generation of nested stubs. It is safe here because
+ // generation code does not use any raw pointers.
+ AllowStubCallsScope allow_stub_calls(masm, true);
+
+ switch (operands_type_) {
+ case BinaryOpIC::UNINITIALIZED:
+ GenerateTypeTransition(masm);
+ break;
+ case BinaryOpIC::SMI:
+ GenerateSmiStub(masm);
+ break;
+ case BinaryOpIC::INT32:
+ UNREACHABLE();
+ // The int32 case is identical to the Smi case. We avoid creating this
+ // ic state on x64.
+ break;
+ case BinaryOpIC::HEAP_NUMBER:
+ GenerateHeapNumberStub(masm);
+ break;
+ case BinaryOpIC::ODDBALL:
+ GenerateOddballStub(masm);
+ break;
+ case BinaryOpIC::BOTH_STRING:
+ GenerateBothStringStub(masm);
+ break;
+ case BinaryOpIC::STRING:
+ GenerateStringStub(masm);
+ break;
+ case BinaryOpIC::GENERIC:
+ GenerateGeneric(masm);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
+void BinaryOpStub::PrintName(StringStream* stream) {
+ const char* op_name = Token::Name(op_);
+ const char* overwrite_name;
+ switch (mode_) {
+ case NO_OVERWRITE: overwrite_name = "Alloc"; break;
+ case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
+ case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
+ default: overwrite_name = "UnknownOverwrite"; break;
+ }
+ stream->Add("BinaryOpStub_%s_%s_%s",
+ op_name,
+ overwrite_name,
+ BinaryOpIC::GetName(operands_type_));
+}
+
+
+void BinaryOpStub::GenerateSmiCode(
MacroAssembler* masm,
Label* slow,
- BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results,
- Token::Value op) {
+ SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
// Arguments to BinaryOpStub are in rdx and rax.
const Register left = rdx;
@@ -1037,9 +1086,9 @@ static void BinaryOpStub_GenerateSmiCode(
// We only generate heapnumber answers for overflowing calculations
// for the four basic arithmetic operations and logical right shift by 0.
bool generate_inline_heapnumber_results =
- (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
- (op == Token::ADD || op == Token::SUB ||
- op == Token::MUL || op == Token::DIV || op == Token::SHR);
+ (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) &&
+ (op_ == Token::ADD || op_ == Token::SUB ||
+ op_ == Token::MUL || op_ == Token::DIV || op_ == Token::SHR);
// Smi check of both operands. If op is BIT_OR, the check is delayed
// until after the OR operation.
@@ -1047,7 +1096,7 @@ static void BinaryOpStub_GenerateSmiCode(
Label use_fp_on_smis;
Label fail;
- if (op != Token::BIT_OR) {
+ if (op_ != Token::BIT_OR) {
Comment smi_check_comment(masm, "-- Smi check arguments");
__ JumpIfNotBothSmi(left, right, &not_smis);
}
@@ -1056,7 +1105,7 @@ static void BinaryOpStub_GenerateSmiCode(
__ bind(&smi_values);
// Perform the operation.
Comment perform_smi(masm, "-- Perform smi operation");
- switch (op) {
+ switch (op_) {
case Token::ADD:
ASSERT(right.is(rax));
__ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
@@ -1128,7 +1177,7 @@ static void BinaryOpStub_GenerateSmiCode(
// operations on known smis (e.g., if the result of the operation
// overflowed the smi range).
__ bind(&use_fp_on_smis);
- if (op == Token::DIV || op == Token::MOD) {
+ if (op_ == Token::DIV || op_ == Token::MOD) {
// Restore left and right to rdx and rax.
__ movq(rdx, rcx);
__ movq(rax, rbx);
@@ -1137,12 +1186,12 @@ static void BinaryOpStub_GenerateSmiCode(
if (generate_inline_heapnumber_results) {
__ AllocateHeapNumber(rcx, rbx, slow);
Comment perform_float(masm, "-- Perform float operation on smis");
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ SmiToInteger32(left, left);
__ cvtqsi2sd(xmm0, left);
} else {
FloatingPointHelper::LoadSSE2SmiOperands(masm);
- switch (op) {
+ switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
@@ -1165,50 +1214,31 @@ static void BinaryOpStub_GenerateSmiCode(
// values that could be smi.
__ bind(&not_smis);
Comment done_comment(masm, "-- Enter non-smi code");
- FloatingPointHelper::ConvertUndefined convert_undefined =
- FloatingPointHelper::BAILOUT_ON_UNDEFINED;
- // This list must be in sync with BinaryOpPatch() behavior in ic.cc.
- if (op == Token::BIT_AND ||
- op == Token::BIT_OR ||
- op == Token::BIT_XOR ||
- op == Token::SAR ||
- op == Token::SHL ||
- op == Token::SHR) {
- convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO;
- }
FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx,
- &smi_values, &fail, convert_undefined);
+ &smi_values, &fail);
__ jmp(&smi_values);
__ bind(&fail);
}
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode);
-
-
-static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
- Label* allocation_failure,
- Label* non_numeric_failure,
- Token::Value op,
- OverwriteMode mode) {
- switch (op) {
+void BinaryOpStub::GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure) {
+ switch (op_) {
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV: {
FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure);
- switch (op) {
+ switch (op_) {
case Token::ADD: __ addsd(xmm0, xmm1); break;
case Token::SUB: __ subsd(xmm0, xmm1); break;
case Token::MUL: __ mulsd(xmm0, xmm1); break;
case Token::DIV: __ divsd(xmm0, xmm1); break;
default: UNREACHABLE();
}
- BinaryOpStub_GenerateHeapResultAllocation(
- masm, allocation_failure, mode);
+ GenerateHeapResultAllocation(masm, allocation_failure);
__ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
__ ret(0);
break;
@@ -1229,7 +1259,7 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
heap_number_map);
- switch (op) {
+ switch (op_) {
case Token::BIT_OR: __ orl(rax, rcx); break;
case Token::BIT_AND: __ andl(rax, rcx); break;
case Token::BIT_XOR: __ xorl(rax, rcx); break;
@@ -1253,7 +1283,7 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
// Logical shift right can produce an unsigned int32 that is not
// an int32, and so is not in the smi range. Allocate a heap number
// in that case.
- if (op == Token::SHR) {
+ if (op_ == Token::SHR) {
__ bind(&non_smi_shr_result);
Label allocation_failed;
__ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
@@ -1290,12 +1320,12 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
// No fall-through from this generated code.
if (FLAG_debug_code) {
__ Abort("Unexpected fall-through in "
- "BinaryStub_GenerateFloatingPointCode.");
+ "BinaryStub::GenerateFloatingPointCode.");
}
}
-void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
+void BinaryOpStub::GenerateStringAddCode(MacroAssembler* masm) {
ASSERT(op_ == Token::ADD);
Label left_not_string, call_runtime;
@@ -1326,17 +1356,58 @@ void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
}
+void BinaryOpStub::GenerateCallRuntimeCode(MacroAssembler* masm) {
+ GenerateRegisterArgsPush(masm);
+ switch (op_) {
+ case Token::ADD:
+ __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
+ break;
+ case Token::SUB:
+ __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
+ break;
+ case Token::MUL:
+ __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
+ break;
+ case Token::DIV:
+ __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
+ break;
+ case Token::MOD:
+ __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
+ break;
+ case Token::BIT_OR:
+ __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
+ break;
+ case Token::BIT_AND:
+ __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
+ break;
+ case Token::BIT_XOR:
+ __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
+ break;
+ case Token::SAR:
+ __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
+ break;
+ case Token::SHL:
+ __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
+ break;
+ case Token::SHR:
+ __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
+ break;
+ default:
+ UNREACHABLE();
+ }
+}
+
+
void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
Label call_runtime;
if (result_type_ == BinaryOpIC::UNINITIALIZED ||
result_type_ == BinaryOpIC::SMI) {
// Only allow smi results.
- BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
} else {
// Allow heap number result and don't make a transition if a heap number
// cannot be allocated.
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
}
// Code falls through if the result is not returned as either a smi or heap
@@ -1345,22 +1416,24 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
if (call_runtime.is_linked()) {
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ GenerateCallRuntimeCode(masm);
}
}
-void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
- // The int32 case is identical to the Smi case. We avoid creating this
- // ic state on x64.
- UNREACHABLE();
+void BinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
+ ASSERT(operands_type_ == BinaryOpIC::STRING);
+ ASSERT(op_ == Token::ADD);
+ GenerateStringAddCode(masm);
+ // Try to add arguments as strings, otherwise, transition to the generic
+ // BinaryOpIC type.
+ GenerateTypeTransition(masm);
}
void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) {
Label call_runtime;
- ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING);
+ ASSERT(operands_type_ == BinaryOpIC::BOTH_STRING);
ASSERT(op_ == Token::ADD);
// If both arguments are strings, call the string add stub.
// Otherwise, do a transition.
@@ -1394,7 +1467,7 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
if (op_ == Token::ADD) {
// Handle string addition here, because it is the only operation
// that does not do a ToNumber conversion on the operands.
- GenerateAddStrings(masm);
+ GenerateStringAddCode(masm);
}
// Convert oddball arguments to numbers.
@@ -1421,79 +1494,39 @@ void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) {
}
-static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm,
- Register input,
- Label* fail) {
- Label ok;
- __ JumpIfSmi(input, &ok, Label::kNear);
- Register heap_number_map = r8;
- Register scratch1 = r9;
- Register scratch2 = r10;
- // HeapNumbers containing 32bit integer values are also allowed.
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
- __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal, fail);
- __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset));
- // Convert, convert back, and compare the two doubles' bits.
- __ cvttsd2siq(scratch2, xmm0);
- __ cvtlsi2sd(xmm1, scratch2);
- __ movq(scratch1, xmm0);
- __ movq(scratch2, xmm1);
- __ cmpq(scratch1, scratch2);
- __ j(not_equal, fail);
- __ bind(&ok);
-}
-
-
void BinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
Label gc_required, not_number;
-
- // It could be that only SMIs have been seen at either the left
- // or the right operand. For precise type feedback, patch the IC
- // again if this changes.
- if (left_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rdx, &not_number);
- }
- if (right_type_ == BinaryOpIC::SMI) {
- BinaryOpStub_CheckSmiInput(masm, rax, &not_number);
- }
-
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &gc_required, &not_number, op_, mode_);
+ GenerateFloatingPointCode(masm, &gc_required, &not_number);
__ bind(&not_number);
GenerateTypeTransition(masm);
__ bind(&gc_required);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ GenerateCallRuntimeCode(masm);
}
void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
Label call_runtime, call_string_add_or_runtime;
- BinaryOpStub_GenerateSmiCode(
- masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
+ GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
- BinaryOpStub_GenerateFloatingPointCode(
- masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
+ GenerateFloatingPointCode(masm, &call_runtime, &call_string_add_or_runtime);
__ bind(&call_string_add_or_runtime);
if (op_ == Token::ADD) {
- GenerateAddStrings(masm);
+ GenerateStringAddCode(masm);
}
__ bind(&call_runtime);
- GenerateRegisterArgsPush(masm);
- GenerateCallRuntime(masm);
+ GenerateCallRuntimeCode(masm);
}
-static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
- Label* alloc_failure,
- OverwriteMode mode) {
+void BinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
+ Label* alloc_failure) {
Label skip_allocation;
+ OverwriteMode mode = mode_;
switch (mode) {
case OVERWRITE_LEFT: {
// If the argument in rdx is already an object, we skip the
@@ -1989,21 +2022,17 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
Register scratch2,
Register scratch3,
Label* on_success,
- Label* on_not_smis,
- ConvertUndefined convert_undefined) {
+ Label* on_not_smis) {
Register heap_number_map = scratch3;
Register smi_result = scratch1;
- Label done, maybe_undefined_first, maybe_undefined_second, first_done;
+ Label done;
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
Label first_smi;
__ JumpIfSmi(first, &first_smi, Label::kNear);
__ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_first
- : on_not_smis);
+ __ j(not_equal, on_not_smis);
// Convert HeapNumber to smi if possible.
__ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2016,15 +2045,11 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
__ j(not_equal, on_not_smis);
__ Integer32ToSmi(first, smi_result);
- __ bind(&first_done);
__ JumpIfSmi(second, (on_success != NULL) ? on_success : &done);
__ bind(&first_smi);
__ AssertNotSmi(second);
__ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map);
- __ j(not_equal,
- (convert_undefined == CONVERT_UNDEFINED_TO_ZERO)
- ? &maybe_undefined_second
- : on_not_smis);
+ __ j(not_equal, on_not_smis);
// Convert second to smi, if possible.
__ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset));
__ movq(scratch2, xmm0);
@@ -2037,25 +2062,8 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
if (on_success != NULL) {
__ jmp(on_success);
} else {
- __ jmp(&done);
- }
-
- __ bind(&maybe_undefined_first);
- __ CompareRoot(first, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(first, first);
- __ jmp(&first_done);
-
- __ bind(&maybe_undefined_second);
- __ CompareRoot(second, Heap::kUndefinedValueRootIndex);
- __ j(not_equal, on_not_smis);
- __ xor_(second, second);
- if (on_success != NULL) {
- __ jmp(on_success);
+ __ bind(&done);
}
- // Else: fall through.
-
- __ bind(&done);
}
@@ -2221,7 +2229,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// F2XM1 calculates 2^st(0) - 1 for -1 < st(0) < 1
__ f2xm1(); // 2^(X-rnd(X)) - 1, rnd(X)
__ fld1(); // 1, 2^(X-rnd(X)) - 1, rnd(X)
- __ faddp(1); // 2^(X-rnd(X)), rnd(X)
+ __ faddp(1); // 1, 2^(X-rnd(X)), rnd(X)
// FSCALE calculates st(0) * 2^st(1)
__ fscale(); // 2^X, rnd(X)
__ fstp(1);
@@ -2596,7 +2604,7 @@ void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) {
__ bind(&runtime);
__ Integer32ToSmi(rcx, rcx);
__ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count.
- __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1);
+ __ TailCallRuntime(Runtime::kNewStrictArgumentsFast, 3, 1);
}
@@ -3014,8 +3022,8 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// r15: original subject string
__ testb(rcx, rcx); // Last use of rcx as encoding of subject string.
__ j(zero, &setup_two_byte, Label::kNear);
- __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqOneByteString::kHeaderSize));
- __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqOneByteString::kHeaderSize));
+ __ lea(arg4, FieldOperand(rdi, r14, times_1, SeqAsciiString::kHeaderSize));
+ __ lea(arg3, FieldOperand(rdi, rbx, times_1, SeqAsciiString::kHeaderSize));
__ jmp(&setup_rest, Label::kNear);
__ bind(&setup_two_byte);
__ lea(arg4, FieldOperand(rdi, r14, times_2, SeqTwoByteString::kHeaderSize));
@@ -3155,7 +3163,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
}
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
STATIC_ASSERT(kTwoByteStringTag == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
@@ -3370,59 +3378,30 @@ static int NegativeComparisonResult(Condition cc) {
}
-static void CheckInputType(MacroAssembler* masm,
- Register input,
- CompareIC::State expected,
- Label* fail) {
- Label ok;
- if (expected == CompareIC::SMI) {
- __ JumpIfNotSmi(input, fail);
- } else if (expected == CompareIC::HEAP_NUMBER) {
- __ JumpIfSmi(input, &ok);
- __ CompareMap(input, masm->isolate()->factory()->heap_number_map(), NULL);
- __ j(not_equal, fail);
- }
- // We could be strict about symbol/string here, but as long as
- // hydrogen doesn't care, the stub doesn't have to care either.
- __ bind(&ok);
-}
-
+void CompareStub::Generate(MacroAssembler* masm) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
-static void BranchIfNonSymbol(MacroAssembler* masm,
- Label* label,
- Register object,
- Register scratch) {
- __ JumpIfSmi(object, label);
- __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
- __ movzxbq(scratch,
- FieldOperand(scratch, Map::kInstanceTypeOffset));
- // Ensure that no non-strings have the symbol bit set.
- STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
- STATIC_ASSERT(kSymbolTag != 0);
- __ testb(scratch, Immediate(kIsSymbolMask));
- __ j(zero, label);
-}
-
-
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
Label check_unequal_objects, done;
- Condition cc = GetCondition();
Factory* factory = masm->isolate()->factory();
- Label miss;
- CheckInputType(masm, rdx, left_, &miss);
- CheckInputType(masm, rax, right_, &miss);
-
- // Compare two smis.
- Label non_smi, smi_done;
- __ JumpIfNotBothSmi(rax, rdx, &non_smi);
- __ subq(rdx, rax);
- __ j(no_overflow, &smi_done);
- __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
- __ bind(&smi_done);
- __ movq(rax, rdx);
- __ ret(0);
- __ bind(&non_smi);
+ // Compare two smis if required.
+ if (include_smi_compare_) {
+ Label non_smi, smi_done;
+ __ JumpIfNotBothSmi(rax, rdx, &non_smi);
+ __ subq(rdx, rax);
+ __ j(no_overflow, &smi_done);
+ __ not_(rdx); // Correct sign in case of overflow. rdx cannot be 0 here.
+ __ bind(&smi_done);
+ __ movq(rax, rdx);
+ __ ret(0);
+ __ bind(&non_smi);
+ } else if (FLAG_debug_code) {
+ Label ok;
+ __ JumpIfNotSmi(rdx, &ok);
+ __ JumpIfNotSmi(rax, &ok);
+ __ Abort("CompareStub: smi operands");
+ __ bind(&ok);
+ }
// The compare stub returns a positive, negative, or zero 64-bit integer
// value in rax, corresponding to result of comparing the two inputs.
@@ -3435,58 +3414,66 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
__ cmpq(rax, rdx);
__ j(not_equal, &not_identical, Label::kNear);
- if (cc != equal) {
+ if (cc_ != equal) {
// Check for undefined. undefined OP undefined is false even though
// undefined == undefined.
Label check_for_nan;
__ CompareRoot(rdx, Heap::kUndefinedValueRootIndex);
__ j(not_equal, &check_for_nan, Label::kNear);
- __ Set(rax, NegativeComparisonResult(cc));
+ __ Set(rax, NegativeComparisonResult(cc_));
__ ret(0);
__ bind(&check_for_nan);
}
// Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
// so we do the second best thing - test it ourselves.
- Label heap_number;
- // If it's not a heap number, then return equal for (in)equality operator.
- __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
- factory->heap_number_map());
- __ j(equal, &heap_number, Label::kNear);
- if (cc != equal) {
- // Call runtime on identical objects. Otherwise return equal.
- __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
- __ j(above_equal, &not_identical, Label::kNear);
- }
- __ Set(rax, EQUAL);
- __ ret(0);
+ // Note: if cc_ != equal, never_nan_nan_ is not used.
+ // We cannot set rax to EQUAL until just before return because
+ // rax must be unchanged on jump to not_identical.
+ if (never_nan_nan_ && (cc_ == equal)) {
+ __ Set(rax, EQUAL);
+ __ ret(0);
+ } else {
+ Label heap_number;
+ // If it's not a heap number, then return equal for (in)equality operator.
+ __ Cmp(FieldOperand(rdx, HeapObject::kMapOffset),
+ factory->heap_number_map());
+ __ j(equal, &heap_number, Label::kNear);
+ if (cc_ != equal) {
+ // Call runtime on identical objects. Otherwise return equal.
+ __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rcx);
+ __ j(above_equal, &not_identical, Label::kNear);
+ }
+ __ Set(rax, EQUAL);
+ __ ret(0);
- __ bind(&heap_number);
- // It is a heap number, so return equal if it's not NaN.
- // For NaN, return 1 for every condition except greater and
- // greater-equal. Return -1 for them, so the comparison yields
- // false for all conditions except not-equal.
- __ Set(rax, EQUAL);
- __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ ucomisd(xmm0, xmm0);
- __ setcc(parity_even, rax);
- // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
- if (cc == greater_equal || cc == greater) {
- __ neg(rax);
+ __ bind(&heap_number);
+ // It is a heap number, so return equal if it's not NaN.
+ // For NaN, return 1 for every condition except greater and
+ // greater-equal. Return -1 for them, so the comparison yields
+ // false for all conditions except not-equal.
+ __ Set(rax, EQUAL);
+ __ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
+ __ ucomisd(xmm0, xmm0);
+ __ setcc(parity_even, rax);
+ // rax is 0 for equal non-NaN heapnumbers, 1 for NaNs.
+ if (cc_ == greater_equal || cc_ == greater) {
+ __ neg(rax);
+ }
+ __ ret(0);
}
- __ ret(0);
__ bind(&not_identical);
}
- if (cc == equal) { // Both strict and non-strict.
+ if (cc_ == equal) { // Both strict and non-strict.
Label slow; // Fallthrough label.
// If we're doing a strict equality comparison, we don't have to do
// type conversion, so we generate code to do fast comparison for objects
// and oddballs. Non-smi numbers and strings still go through the usual
// slow-case code.
- if (strict()) {
+ if (strict_) {
// If either is a Smi (we know that not both are), then they can only
// be equal if the other is a HeapNumber. If so, use the slow case.
{
@@ -3538,38 +3525,40 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
}
// Generate the number comparison code.
- Label non_number_comparison;
- Label unordered;
- FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
- __ xorl(rax, rax);
- __ xorl(rcx, rcx);
- __ ucomisd(xmm0, xmm1);
+ if (include_number_compare_) {
+ Label non_number_comparison;
+ Label unordered;
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, &non_number_comparison);
+ __ xorl(rax, rax);
+ __ xorl(rcx, rcx);
+ __ ucomisd(xmm0, xmm1);
- // Don't base result on EFLAGS when a NaN is involved.
- __ j(parity_even, &unordered, Label::kNear);
- // Return a result of -1, 0, or 1, based on EFLAGS.
- __ setcc(above, rax);
- __ setcc(below, rcx);
- __ subq(rax, rcx);
- __ ret(0);
+ // Don't base result on EFLAGS when a NaN is involved.
+ __ j(parity_even, &unordered, Label::kNear);
+ // Return a result of -1, 0, or 1, based on EFLAGS.
+ __ setcc(above, rax);
+ __ setcc(below, rcx);
+ __ subq(rax, rcx);
+ __ ret(0);
- // If one of the numbers was NaN, then the result is always false.
- // The cc is never not-equal.
- __ bind(&unordered);
- ASSERT(cc != not_equal);
- if (cc == less || cc == less_equal) {
- __ Set(rax, 1);
- } else {
- __ Set(rax, -1);
- }
- __ ret(0);
+ // If one of the numbers was NaN, then the result is always false.
+ // The cc is never not-equal.
+ __ bind(&unordered);
+ ASSERT(cc_ != not_equal);
+ if (cc_ == less || cc_ == less_equal) {
+ __ Set(rax, 1);
+ } else {
+ __ Set(rax, -1);
+ }
+ __ ret(0);
- // The number comparison code did not provide a valid result.
- __ bind(&non_number_comparison);
+ // The number comparison code did not provide a valid result.
+ __ bind(&non_number_comparison);
+ }
// Fast negative check for symbol-to-symbol equality.
Label check_for_strings;
- if (cc == equal) {
+ if (cc_ == equal) {
BranchIfNonSymbol(masm, &check_for_strings, rax, kScratchRegister);
BranchIfNonSymbol(masm, &check_for_strings, rdx, kScratchRegister);
@@ -3585,7 +3574,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
rdx, rax, rcx, rbx, &check_unequal_objects);
// Inline comparison of ASCII strings.
- if (cc == equal) {
+ if (cc_ == equal) {
StringCompareStub::GenerateFlatAsciiStringEquals(masm,
rdx,
rax,
@@ -3606,7 +3595,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
#endif
__ bind(&check_unequal_objects);
- if (cc == equal && !strict()) {
+ if (cc_ == equal && !strict_) {
// Not strict equality. Objects are unequal if
// they are both JSObjects and not undetectable,
// and their pointers are different.
@@ -3646,11 +3635,11 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Figure out which native to call and setup the arguments.
Builtins::JavaScript builtin;
- if (cc == equal) {
- builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+ if (cc_ == equal) {
+ builtin = strict_ ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
} else {
builtin = Builtins::COMPARE;
- __ Push(Smi::FromInt(NegativeComparisonResult(cc)));
+ __ Push(Smi::FromInt(NegativeComparisonResult(cc_)));
}
// Restore return address on the stack.
@@ -3659,9 +3648,22 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
// Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
// tagged as a small integer.
__ InvokeBuiltin(builtin, JUMP_FUNCTION);
+}
- __ bind(&miss);
- GenerateMiss(masm);
+
+void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
+ Label* label,
+ Register object,
+ Register scratch) {
+ __ JumpIfSmi(object, label);
+ __ movq(scratch, FieldOperand(object, HeapObject::kMapOffset));
+ __ movzxbq(scratch,
+ FieldOperand(scratch, Map::kInstanceTypeOffset));
+ // Ensure that no non-strings have the symbol bit set.
+ STATIC_ASSERT(LAST_TYPE < kNotStringTag + kIsSymbolMask);
+ STATIC_ASSERT(kSymbolTag != 0);
+ __ testb(scratch, Immediate(kIsSymbolMask));
+ __ j(zero, label);
}
@@ -4419,6 +4421,44 @@ Register InstanceofStub::left() { return no_reg; }
Register InstanceofStub::right() { return no_reg; }
+int CompareStub::MinorKey() {
+ // Encode the three parameters in a unique 16 bit value. To avoid duplicate
+ // stubs the never NaN NaN condition is only taken into account if the
+ // condition is equals.
+ ASSERT(static_cast<unsigned>(cc_) < (1 << 12));
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ return ConditionField::encode(static_cast<unsigned>(cc_))
+ | RegisterField::encode(false) // lhs_ and rhs_ are not used
+ | StrictField::encode(strict_)
+ | NeverNanNanField::encode(cc_ == equal ? never_nan_nan_ : false)
+ | IncludeNumberCompareField::encode(include_number_compare_)
+ | IncludeSmiCompareField::encode(include_smi_compare_);
+}
+
+
+// Unfortunately you have to run without snapshots to see most of these
+// names in the profile since most compare stubs end up in the snapshot.
+void CompareStub::PrintName(StringStream* stream) {
+ ASSERT(lhs_.is(no_reg) && rhs_.is(no_reg));
+ const char* cc_name;
+ switch (cc_) {
+ case less: cc_name = "LT"; break;
+ case greater: cc_name = "GT"; break;
+ case less_equal: cc_name = "LE"; break;
+ case greater_equal: cc_name = "GE"; break;
+ case equal: cc_name = "EQ"; break;
+ case not_equal: cc_name = "NE"; break;
+ default: cc_name = "UnknownCondition"; break;
+ }
+ bool is_equality = cc_ == equal || cc_ == not_equal;
+ stream->Add("CompareStub_%s", cc_name);
+ if (strict_ && is_equality) stream->Add("_STRICT");
+ if (never_nan_nan_ && is_equality) stream->Add("_NO_NAN");
+ if (!include_number_compare_) stream->Add("_NO_NUMBER");
+ if (!include_smi_compare_) stream->Add("_NO_SMI");
+}
+
+
// -------------------------------------------------------------------------
// StringCharCodeAtGenerator
@@ -4656,8 +4696,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
&call_runtime);
// Get the two characters forming the sub string.
- __ movzxbq(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
// Try to lookup two character string in symbol table. If it is not found
// just allocate a new one.
@@ -4673,11 +4713,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// rbx - first byte: first character
// rbx - second byte: *maybe* second character
// Make sure that the second byte of rbx contains the second character.
- __ movzxbq(rcx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
+ __ movzxbq(rcx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
__ shll(rcx, Immediate(kBitsPerByte));
__ orl(rbx, rcx);
// Write both characters to the new string.
- __ movw(FieldOperand(rax, SeqOneByteString::kHeaderSize), rbx);
+ __ movw(FieldOperand(rax, SeqAsciiString::kHeaderSize), rbx);
__ IncrementCounter(counters->string_add_native(), 1);
__ ret(2 * kPointerSize);
@@ -4700,7 +4740,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
Label non_ascii, allocated, ascii_data;
__ movl(rcx, r8);
__ and_(rcx, r9);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testl(rcx, Immediate(kStringEncodingMask));
__ j(zero, &non_ascii);
@@ -4725,6 +4765,11 @@ void StringAddStub::Generate(MacroAssembler* masm) {
// r9: second instance type.
__ testb(rcx, Immediate(kAsciiDataHintMask));
__ j(not_zero, &ascii_data);
+ __ xor_(r8, r9);
+ STATIC_ASSERT(kAsciiStringTag != 0 && kAsciiDataHintTag != 0);
+ __ andb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ cmpb(r8, Immediate(kAsciiStringTag | kAsciiDataHintTag));
+ __ j(equal, &ascii_data);
// Allocate a two byte cons string.
__ AllocateTwoByteConsString(rcx, rdi, no_reg, &call_runtime);
__ jmp(&allocated);
@@ -4754,8 +4799,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rcx, FieldOperand(rax, ExternalString::kResourceDataOffset));
__ jmp(&first_prepared, Label::kNear);
__ bind(&first_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rcx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rcx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
__ bind(&first_prepared);
// Check whether both strings have same encoding.
@@ -4775,8 +4820,8 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ movq(rdx, FieldOperand(rdx, ExternalString::kResourceDataOffset));
__ jmp(&second_prepared, Label::kNear);
__ bind(&second_is_sequential);
- STATIC_ASSERT(SeqOneByteString::kHeaderSize == SeqTwoByteString::kHeaderSize);
- __ lea(rdx, FieldOperand(rdx, SeqOneByteString::kHeaderSize));
+ STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize);
+ __ lea(rdx, FieldOperand(rdx, SeqAsciiString::kHeaderSize));
__ bind(&second_prepared);
Label non_ascii_string_add_flat_result;
@@ -4792,7 +4837,7 @@ void StringAddStub::Generate(MacroAssembler* masm) {
__ AllocateAsciiString(rax, rbx, rdi, r8, r9, &call_runtime);
// rax: result string
// Locate first character of result.
- __ lea(rbx, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ lea(rbx, FieldOperand(rax, SeqAsciiString::kHeaderSize));
// rcx: first char of first string
// rbx: first character of result
// r14: length of first string
@@ -5065,7 +5110,7 @@ void StringHelper::GenerateTwoCharacterSymbolTableProbe(MacroAssembler* masm,
temp, temp, &next_probe[i]);
// Check if the two characters match.
- __ movl(temp, FieldOperand(candidate, SeqOneByteString::kHeaderSize));
+ __ movl(temp, FieldOperand(candidate, SeqAsciiString::kHeaderSize));
__ andl(temp, Immediate(0x0000ffff));
__ cmpl(chars, temp);
__ j(equal, &found_in_symbol_table);
@@ -5243,7 +5288,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
// string's encoding is wrong because we always have to recheck encoding of
// the newly created string's parent anyways due to externalized strings.
Label two_byte_slice, set_slice_header;
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_slice, Label::kNear);
@@ -5283,11 +5328,11 @@ void SubStringStub::Generate(MacroAssembler* masm) {
__ j(not_zero, &runtime);
__ movq(rdi, FieldOperand(rdi, ExternalString::kResourceDataOffset));
// Move the pointer so that offset-wise, it looks like a sequential string.
- STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+ STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqAsciiString::kHeaderSize);
__ subq(rdi, Immediate(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
__ bind(&sequential_string);
- STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+ STATIC_ASSERT((kAsciiStringTag & kStringEncodingMask) != 0);
__ testb(rbx, Immediate(kStringEncodingMask));
__ j(zero, &two_byte_sequential);
@@ -5300,10 +5345,10 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_1);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
- __ lea(rdi, FieldOperand(rax, SeqOneByteString::kHeaderSize));
+ __ lea(rdi, FieldOperand(rax, SeqAsciiString::kHeaderSize));
// rax: result string
// rcx: result length
@@ -5325,7 +5370,7 @@ void SubStringStub::Generate(MacroAssembler* masm) {
{ // Locate character of sub string start.
SmiIndex smi_as_index = masm->SmiToIndex(rdx, rdx, times_2);
__ lea(rsi, Operand(rdi, smi_as_index.reg, smi_as_index.scale,
- SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ SeqAsciiString::kHeaderSize - kHeapObjectTag));
}
// Locate first character of result.
__ lea(rdi, FieldOperand(rax, SeqTwoByteString::kHeaderSize));
@@ -5465,9 +5510,9 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop(
// doesn't need an additional compare.
__ SmiToInteger32(length, length);
__ lea(left,
- FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize));
+ FieldOperand(left, length, times_1, SeqAsciiString::kHeaderSize));
__ lea(right,
- FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize));
+ FieldOperand(right, length, times_1, SeqAsciiString::kHeaderSize));
__ neg(length);
Register index = length; // index = -length;
@@ -5523,7 +5568,7 @@ void StringCompareStub::Generate(MacroAssembler* masm) {
void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SMI);
+ ASSERT(state_ == CompareIC::SMIS);
Label miss;
__ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
@@ -5535,7 +5580,7 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
__ subq(rdx, rax);
__ j(no_overflow, &done, Label::kNear);
// Correct sign of result in case of overflow.
- __ not_(rdx);
+ __ SmiNot(rdx, rdx);
__ bind(&done);
__ movq(rax, rdx);
}
@@ -5547,41 +5592,23 @@ void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::HEAP_NUMBER);
+ ASSERT(state_ == CompareIC::HEAP_NUMBERS);
Label generic_stub;
Label unordered, maybe_undefined1, maybe_undefined2;
Label miss;
+ Condition either_smi = masm->CheckEitherSmi(rax, rdx);
+ __ j(either_smi, &generic_stub, Label::kNear);
- if (left_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rdx, &miss);
- }
- if (right_ == CompareIC::SMI) {
- __ JumpIfNotSmi(rax, &miss);
- }
-
- // Load left and right operand.
- Label done, left, left_smi, right_smi;
- __ JumpIfSmi(rax, &right_smi, Label::kNear);
- __ CompareMap(rax, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CmpObjectType(rax, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined1, Label::kNear);
- __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ jmp(&left, Label::kNear);
- __ bind(&right_smi);
- __ SmiToInteger32(rcx, rax); // Can't clobber rax yet.
- __ cvtlsi2sd(xmm1, rcx);
-
- __ bind(&left);
- __ JumpIfSmi(rdx, &left_smi, Label::kNear);
- __ CompareMap(rdx, masm->isolate()->factory()->heap_number_map(), NULL);
+ __ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
+
+ // Load left and right operand
__ movsd(xmm0, FieldOperand(rdx, HeapNumber::kValueOffset));
- __ jmp(&done);
- __ bind(&left_smi);
- __ SmiToInteger32(rcx, rdx); // Can't clobber rdx yet.
- __ cvtlsi2sd(xmm0, rcx);
+ __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset));
- __ bind(&done);
// Compare operands
__ ucomisd(xmm0, xmm1);
@@ -5597,16 +5624,14 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
__ ret(0);
__ bind(&unordered);
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
__ bind(&generic_stub);
- ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC,
- CompareIC::GENERIC);
__ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
__ bind(&maybe_undefined1);
if (Token::IsOrderedRelationalCompareOp(op_)) {
__ Cmp(rax, masm->isolate()->factory()->undefined_value());
__ j(not_equal, &miss);
- __ JumpIfSmi(rdx, &unordered);
__ CmpObjectType(rdx, HEAP_NUMBER_TYPE, rcx);
__ j(not_equal, &maybe_undefined2, Label::kNear);
__ jmp(&unordered);
@@ -5624,7 +5649,7 @@ void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) {
void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::SYMBOL);
+ ASSERT(state_ == CompareIC::SYMBOLS);
ASSERT(GetCondition() == equal);
// Registers containing left and right operands respectively.
@@ -5667,7 +5692,7 @@ void ICCompareStub::GenerateSymbols(MacroAssembler* masm) {
void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::STRING);
+ ASSERT(state_ == CompareIC::STRINGS);
Label miss;
bool equality = Token::IsEqualityOp(op_);
@@ -5753,7 +5778,7 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
- ASSERT(state_ == CompareIC::OBJECT);
+ ASSERT(state_ == CompareIC::OBJECTS);
Label miss;
Condition either_smi = masm->CheckEitherSmi(rdx, rax);
__ j(either_smi, &miss, Label::kNear);
@@ -6214,8 +6239,13 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm, Mode mode) {
ASSERT(!address.is(arg1));
__ Move(address, regs_.address());
__ Move(arg1, regs_.object());
- // TODO(gc) Can we just set address arg2 in the beginning?
- __ Move(arg2, address);
+ if (mode == INCREMENTAL_COMPACTION) {
+ // TODO(gc) Can we just set address arg2 in the beginning?
+ __ Move(arg2, address);
+ } else {
+ ASSERT(mode == INCREMENTAL);
+ __ movq(arg2, Operand(address, 0));
+ }
__ LoadAddress(arg3, ExternalReference::isolate_address());
int argument_count = 3;
diff --git a/deps/v8/src/x64/code-stubs-x64.h b/deps/v8/src/x64/code-stubs-x64.h
index ab8ea76c8..6a1a18f83 100644
--- a/deps/v8/src/x64/code-stubs-x64.h
+++ b/deps/v8/src/x64/code-stubs-x64.h
@@ -79,6 +79,13 @@ class StoreBufferOverflowStub: public CodeStub {
};
+// Flag that indicates how to generate code for the stub GenericBinaryOpStub.
+enum GenericBinaryFlags {
+ NO_GENERIC_BINARY_FLAGS = 0,
+ NO_SMI_CODE_IN_STUB = 1 << 0 // Omit smi code in stub.
+};
+
+
class UnaryOpStub: public CodeStub {
public:
UnaryOpStub(Token::Value op,
@@ -150,6 +157,95 @@ class UnaryOpStub: public CodeStub {
};
+class BinaryOpStub: public CodeStub {
+ public:
+ BinaryOpStub(Token::Value op, OverwriteMode mode)
+ : op_(op),
+ mode_(mode),
+ operands_type_(BinaryOpIC::UNINITIALIZED),
+ result_type_(BinaryOpIC::UNINITIALIZED) {
+ ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
+ }
+
+ BinaryOpStub(
+ int key,
+ BinaryOpIC::TypeInfo operands_type,
+ BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
+ : op_(OpBits::decode(key)),
+ mode_(ModeBits::decode(key)),
+ operands_type_(operands_type),
+ result_type_(result_type) { }
+
+ private:
+ enum SmiCodeGenerateHeapNumberResults {
+ ALLOW_HEAPNUMBER_RESULTS,
+ NO_HEAPNUMBER_RESULTS
+ };
+
+ Token::Value op_;
+ OverwriteMode mode_;
+
+ // Operand type information determined at runtime.
+ BinaryOpIC::TypeInfo operands_type_;
+ BinaryOpIC::TypeInfo result_type_;
+
+ virtual void PrintName(StringStream* stream);
+
+ // Minor key encoding in 15 bits RRRTTTOOOOOOOMM.
+ class ModeBits: public BitField<OverwriteMode, 0, 2> {};
+ class OpBits: public BitField<Token::Value, 2, 7> {};
+ class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 9, 3> {};
+ class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 12, 3> {};
+
+ Major MajorKey() { return BinaryOp; }
+ int MinorKey() {
+ return OpBits::encode(op_)
+ | ModeBits::encode(mode_)
+ | OperandTypeInfoBits::encode(operands_type_)
+ | ResultTypeInfoBits::encode(result_type_);
+ }
+
+ void Generate(MacroAssembler* masm);
+ void GenerateGeneric(MacroAssembler* masm);
+ void GenerateSmiCode(MacroAssembler* masm,
+ Label* slow,
+ SmiCodeGenerateHeapNumberResults heapnumber_results);
+ void GenerateFloatingPointCode(MacroAssembler* masm,
+ Label* allocation_failure,
+ Label* non_numeric_failure);
+ void GenerateStringAddCode(MacroAssembler* masm);
+ void GenerateCallRuntimeCode(MacroAssembler* masm);
+ void GenerateLoadArguments(MacroAssembler* masm);
+ void GenerateReturn(MacroAssembler* masm);
+ void GenerateUninitializedStub(MacroAssembler* masm);
+ void GenerateSmiStub(MacroAssembler* masm);
+ void GenerateInt32Stub(MacroAssembler* masm);
+ void GenerateHeapNumberStub(MacroAssembler* masm);
+ void GenerateOddballStub(MacroAssembler* masm);
+ void GenerateStringStub(MacroAssembler* masm);
+ void GenerateBothStringStub(MacroAssembler* masm);
+ void GenerateGenericStub(MacroAssembler* masm);
+
+ void GenerateHeapResultAllocation(MacroAssembler* masm, Label* alloc_failure);
+ void GenerateRegisterArgsPush(MacroAssembler* masm);
+ void GenerateTypeTransition(MacroAssembler* masm);
+ void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
+
+ virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
+
+ virtual InlineCacheState GetICState() {
+ return BinaryOpIC::ToState(operands_type_);
+ }
+
+ virtual void FinishCode(Handle<Code> code) {
+ code->set_binary_op_type(operands_type_);
+ code->set_binary_op_result_type(result_type_);
+ }
+
+ friend class CodeGenerator;
+};
+
+
class StringHelper : public AllStatic {
public:
// Generate code for copying characters using a simple loop. This should only
diff --git a/deps/v8/src/x64/codegen-x64.cc b/deps/v8/src/x64/codegen-x64.cc
index 7954604e9..2924810c1 100644
--- a/deps/v8/src/x64/codegen-x64.cc
+++ b/deps/v8/src/x64/codegen-x64.cc
@@ -99,36 +99,6 @@ UnaryMathFunction CreateTranscendentalFunction(TranscendentalCache::Type type) {
}
-UnaryMathFunction CreateExpFunction() {
- if (!FLAG_fast_math) return &exp;
- size_t actual_size;
- byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
- if (buffer == NULL) return &exp;
- ExternalReference::InitializeMathExpData();
-
- MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
- // xmm0: raw double input.
- XMMRegister input = xmm0;
- XMMRegister result = xmm1;
- __ push(rax);
- __ push(rbx);
-
- MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx);
-
- __ pop(rbx);
- __ pop(rax);
- __ movsd(xmm0, result);
- __ Ret();
-
- CodeDesc desc;
- masm.GetCode(&desc);
-
- CPU::FlushICache(buffer, actual_size);
- OS::ProtectCode(buffer, actual_size);
- return FUNCTION_CAST<UnaryMathFunction>(buffer);
-}
-
-
UnaryMathFunction CreateSqrtFunction() {
size_t actual_size;
// Allocate buffer in executable space.
@@ -581,7 +551,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
// Dispatch on the encoding: ASCII or two-byte.
Label ascii;
__ bind(&seq_string);
- STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+ STATIC_ASSERT((kStringEncodingMask & kAsciiStringTag) != 0);
STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
__ testb(result, Immediate(kStringEncodingMask));
__ j(not_zero, &ascii, Label::kNear);
@@ -601,167 +571,12 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm,
__ movzxbl(result, FieldOperand(string,
index,
times_1,
- SeqOneByteString::kHeaderSize));
- __ bind(&done);
-}
-
-
-void SeqStringSetCharGenerator::Generate(MacroAssembler* masm,
- String::Encoding encoding,
- Register string,
- Register index,
- Register value) {
- if (FLAG_debug_code) {
- __ Check(masm->CheckSmi(index), "Non-smi index");
- __ Check(masm->CheckSmi(value), "Non-smi value");
-
- __ SmiCompare(index, FieldOperand(string, String::kLengthOffset));
- __ Check(less, "Index is too large");
-
- __ SmiCompare(index, Smi::FromInt(0));
- __ Check(greater_equal, "Index is negative");
-
- __ push(value);
- __ movq(value, FieldOperand(string, HeapObject::kMapOffset));
- __ movzxbq(value, FieldOperand(value, Map::kInstanceTypeOffset));
-
- __ andb(value, Immediate(kStringRepresentationMask | kStringEncodingMask));
- static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
- static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
- __ cmpq(value, Immediate(encoding == String::ONE_BYTE_ENCODING
- ? one_byte_seq_type : two_byte_seq_type));
- __ Check(equal, "Unexpected string type");
- __ pop(value);
- }
-
- __ SmiToInteger32(value, value);
- __ SmiToInteger32(index, index);
- if (encoding == String::ONE_BYTE_ENCODING) {
- __ movb(FieldOperand(string, index, times_1, SeqString::kHeaderSize),
- value);
- } else {
- __ movw(FieldOperand(string, index, times_2, SeqString::kHeaderSize),
- value);
- }
-}
-
-
-void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2) {
- ASSERT(!input.is(result));
- ASSERT(!input.is(double_scratch));
- ASSERT(!result.is(double_scratch));
- ASSERT(!temp1.is(temp2));
- ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
-
- Label done;
-
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize));
- __ xorpd(result, result);
- __ ucomisd(double_scratch, input);
- __ j(above_equal, &done);
- __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize));
- __ j(above_equal, &done);
- __ movsd(double_scratch, Operand(kScratchRegister, 3 * kDoubleSize));
- __ movsd(result, Operand(kScratchRegister, 4 * kDoubleSize));
- __ mulsd(double_scratch, input);
- __ addsd(double_scratch, result);
- __ movq(temp2, double_scratch);
- __ subsd(double_scratch, result);
- __ movsd(result, Operand(kScratchRegister, 6 * kDoubleSize));
- __ lea(temp1, Operand(temp2, 0x1ff800));
- __ and_(temp2, Immediate(0x7ff));
- __ shr(temp1, Immediate(11));
- __ mulsd(double_scratch, Operand(kScratchRegister, 5 * kDoubleSize));
- __ movq(kScratchRegister, ExternalReference::math_exp_log_table());
- __ shl(temp1, Immediate(52));
- __ or_(temp1, Operand(kScratchRegister, temp2, times_8, 0));
- __ movq(kScratchRegister, ExternalReference::math_exp_constants(0));
- __ subsd(double_scratch, input);
- __ movsd(input, double_scratch);
- __ subsd(result, double_scratch);
- __ mulsd(input, double_scratch);
- __ mulsd(result, input);
- __ movq(input, temp1);
- __ mulsd(result, Operand(kScratchRegister, 7 * kDoubleSize));
- __ subsd(result, double_scratch);
- __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize));
- __ mulsd(result, input);
-
+ SeqAsciiString::kHeaderSize));
__ bind(&done);
}
#undef __
-
-static const int kNoCodeAgeSequenceLength = 6;
-
-static byte* GetNoCodeAgeSequence(uint32_t* length) {
- static bool initialized = false;
- static byte sequence[kNoCodeAgeSequenceLength];
- *length = kNoCodeAgeSequenceLength;
- if (!initialized) {
- // The sequence of instructions that is patched out for aging code is the
- // following boilerplate stack-building prologue that is found both in
- // FUNCTION and OPTIMIZED_FUNCTION code:
- CodePatcher patcher(sequence, kNoCodeAgeSequenceLength);
- patcher.masm()->push(rbp);
- patcher.masm()->movq(rbp, rsp);
- patcher.masm()->push(rsi);
- patcher.masm()->push(rdi);
- initialized = true;
- }
- return sequence;
-}
-
-
-bool Code::IsYoungSequence(byte* sequence) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- bool result = (!memcmp(sequence, young_sequence, young_length));
- ASSERT(result || *sequence == kCallOpcode);
- return result;
-}
-
-
-void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
- MarkingParity* parity) {
- if (IsYoungSequence(sequence)) {
- *age = kNoAge;
- *parity = NO_MARKING_PARITY;
- } else {
- sequence++; // Skip the kCallOpcode byte
- Address target_address = sequence + *reinterpret_cast<int*>(sequence) +
- Assembler::kCallTargetAddressOffset;
- Code* stub = GetCodeFromTargetAddress(target_address);
- GetCodeAgeAndParity(stub, age, parity);
- }
-}
-
-
-void Code::PatchPlatformCodeAge(byte* sequence,
- Code::Age age,
- MarkingParity parity) {
- uint32_t young_length;
- byte* young_sequence = GetNoCodeAgeSequence(&young_length);
- if (age == kNoAge) {
- memcpy(sequence, young_sequence, young_length);
- CPU::FlushICache(sequence, young_length);
- } else {
- Code* stub = GetCodeAgeStub(age, parity);
- CodePatcher patcher(sequence, young_length);
- patcher.masm()->call(stub->instruction_start());
- patcher.masm()->nop();
- }
-}
-
-
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_X64
diff --git a/deps/v8/src/x64/codegen-x64.h b/deps/v8/src/x64/codegen-x64.h
index d44409521..2e8075103 100644
--- a/deps/v8/src/x64/codegen-x64.h
+++ b/deps/v8/src/x64/codegen-x64.h
@@ -39,6 +39,7 @@ class CompilationInfo;
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
// -------------------------------------------------------------------------
// CodeGenerator
@@ -83,20 +84,6 @@ class StringCharLoadGenerator : public AllStatic {
DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
};
-
-class MathExpGenerator : public AllStatic {
- public:
- static void EmitMathExp(MacroAssembler* masm,
- XMMRegister input,
- XMMRegister result,
- XMMRegister double_scratch,
- Register temp1,
- Register temp2);
-
- private:
- DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
-};
-
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_H_
diff --git a/deps/v8/src/x64/deoptimizer-x64.cc b/deps/v8/src/x64/deoptimizer-x64.cc
index c8fdfce26..a3fe8f9cf 100644
--- a/deps/v8/src/x64/deoptimizer-x64.cc
+++ b/deps/v8/src/x64/deoptimizer-x64.cc
@@ -116,6 +116,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x1f;
+static const byte kJaeInstruction = 0x73;
+static const byte kJaeOffset = 0x07;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
@@ -127,26 +129,31 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
- // The back edge bookkeeping code matches the pattern:
+ // The stack check code matches the pattern:
//
- // add <profiling_counter>, <-delta>
- // jns ok
+ // cmp rsp, <limit>
+ // jae ok
// call <stack guard>
// test rax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
- // add <profiling_counter>, <-delta> ;; Not changed
+ // cmp rsp, <limit> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test rax, <loop nesting depth>
// ok:
//
- ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
- ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
- ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+ if (FLAG_count_based_interrupts) {
+ ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
+ } else {
+ ASSERT_EQ(kJaeInstruction, *(call_target_address - 3));
+ ASSERT_EQ(kJaeOffset, *(call_target_address - 2));
+ }
+ ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
Assembler::set_target_address_at(call_target_address,
@@ -169,8 +176,13 @@ void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
- *(call_target_address - 3) = kJnsInstruction;
- *(call_target_address - 2) = kJnsOffset;
+ if (FLAG_count_based_interrupts) {
+ *(call_target_address - 3) = kJnsInstruction;
+ *(call_target_address - 2) = kJnsOffset;
+ } else {
+ *(call_target_address - 3) = kJaeInstruction;
+ *(call_target_address - 2) = kJaeOffset;
+ }
Assembler::set_target_address_at(call_target_address,
check_code->entry());
diff --git a/deps/v8/src/x64/disasm-x64.cc b/deps/v8/src/x64/disasm-x64.cc
index fb0914d7d..c8606c40b 100644
--- a/deps/v8/src/x64/disasm-x64.cc
+++ b/deps/v8/src/x64/disasm-x64.cc
@@ -1244,13 +1244,6 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
AppendToBuffer("xorps %s, ", NameOfXMMRegister(regop));
current += PrintRightXMMOperand(current);
- } else if (opcode == 0x50) {
- // movmskps reg, xmm
- int mod, regop, rm;
- get_modrm(*current, &mod, &regop, &rm);
- AppendToBuffer("movmskps %s, ", NameOfCPURegister(regop));
- current += PrintRightXMMOperand(current);
-
} else if ((opcode & 0xF0) == 0x80) {
// Jcc: Conditional jump (branch).
current = data + JumpConditional(data);
@@ -1731,11 +1724,6 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
data += F6F7Instruction(data);
break;
- case 0x3C:
- AppendToBuffer("cmp al, 0x%x", *reinterpret_cast<int8_t*>(data + 1));
- data +=2;
- break;
-
default:
UnimplementedInstruction();
data += 1;
diff --git a/deps/v8/src/x64/full-codegen-x64.cc b/deps/v8/src/x64/full-codegen-x64.cc
index 68773e96e..475fb9de3 100644
--- a/deps/v8/src/x64/full-codegen-x64.cc
+++ b/deps/v8/src/x64/full-codegen-x64.cc
@@ -119,7 +119,7 @@ void FullCodeGenerator::Generate() {
handler_table_ =
isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
profiling_counter_ = isolate()->factory()->NewJSGlobalPropertyCell(
- Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+ Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget)));
SetFunctionPosition(function());
Comment cmnt(masm_, "[ function compiled by full code generator");
@@ -152,7 +152,6 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
- info->set_prologue_offset(masm_->pc_offset());
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -325,27 +324,34 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
}
-void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
- Label* back_edge_target) {
- Comment cmnt(masm_, "[ Back edge bookkeeping");
+void FullCodeGenerator::EmitStackCheck(IterationStatement* stmt,
+ Label* back_edge_target) {
+ Comment cmnt(masm_, "[ Stack check");
Label ok;
- int weight = 1;
- if (FLAG_weighted_back_edges) {
- ASSERT(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kBackEdgeDistanceUnit));
+ if (FLAG_count_based_interrupts) {
+ int weight = 1;
+ if (FLAG_weighted_back_edges) {
+ ASSERT(back_edge_target->is_bound());
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+ weight = Min(kMaxBackEdgeWeight,
+ Max(1, distance / kBackEdgeDistanceUnit));
+ }
+ EmitProfilingCounterDecrement(weight);
+ __ j(positive, &ok, Label::kNear);
+ InterruptStub stub;
+ __ CallStub(&stub);
+ } else {
+ __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
+ __ j(above_equal, &ok, Label::kNear);
+ StackCheckStub stub;
+ __ CallStub(&stub);
}
- EmitProfilingCounterDecrement(weight);
- __ j(positive, &ok, Label::kNear);
- InterruptStub stub;
- __ CallStub(&stub);
// Record a mapping of this PC offset to the OSR id. This is used to find
// the AST id from the unoptimized code in order to use it as a key into
// the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
+ RecordStackCheck(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
@@ -354,7 +360,9 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
ASSERT(loop_depth() > 0);
__ testl(rax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
- EmitProfilingCounterReset();
+ if (FLAG_count_based_interrupts) {
+ EmitProfilingCounterReset();
+ }
__ bind(&ok);
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
@@ -751,7 +759,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
- // The variable in the declaration always resides in the current context.
+ // The variable in the declaration always resides in the current function
+ // context.
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
@@ -882,32 +891,33 @@ void FullCodeGenerator::VisitFunctionDeclaration(
void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
- Variable* variable = declaration->proxy()->var();
- ASSERT(variable->location() == Variable::CONTEXT);
- ASSERT(variable->interface()->IsFrozen());
-
- Comment cmnt(masm_, "[ ModuleDeclaration");
- EmitDebugCheckDeclarationContext(variable);
+ VariableProxy* proxy = declaration->proxy();
+ Variable* variable = proxy->var();
+ Handle<JSModule> instance = declaration->module()->interface()->Instance();
+ ASSERT(!instance.is_null());
- // Load instance object.
- __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
- __ movq(rax, ContextOperand(rax, variable->interface()->Index()));
- __ movq(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
+ switch (variable->location()) {
+ case Variable::UNALLOCATED: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ globals_->Add(variable->name(), zone());
+ globals_->Add(instance, zone());
+ Visit(declaration->module());
+ break;
+ }
- // Assign it.
- __ movq(ContextOperand(rsi, variable->index()), rax);
- // We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(rsi,
- Context::SlotOffset(variable->index()),
- rax,
- rcx,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
- PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+ case Variable::CONTEXT: {
+ Comment cmnt(masm_, "[ ModuleDeclaration");
+ EmitDebugCheckDeclarationContext(variable);
+ __ Move(ContextOperand(rsi, variable->index()), instance);
+ Visit(declaration->module());
+ break;
+ }
- // Traverse into body.
- Visit(declaration->module());
+ case Variable::PARAMETER:
+ case Variable::LOCAL:
+ case Variable::LOOKUP:
+ UNREACHABLE();
+ }
}
@@ -949,14 +959,6 @@ void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
}
-void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
- // Call the runtime to declare the modules.
- __ Push(descriptions);
- __ CallRuntime(Runtime::kDeclareModules, 1);
- // Return value is ignored.
-}
-
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -1212,7 +1214,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
__ bind(loop_statement.continue_label());
__ SmiAddConstant(Operand(rsp, 0 * kPointerSize), Smi::FromInt(1));
- EmitBackEdgeBookkeeping(stmt, &loop);
+ EmitStackCheck(stmt, &loop);
__ jmp(&loop);
// Remove the pointers stored on the stack.
@@ -1366,9 +1368,9 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
__ movq(rax, ContextSlotOperandCheckExtensions(local, slow));
- if (local->mode() == LET ||
- local->mode() == CONST ||
- local->mode() == CONST_HARMONY) {
+ if (local->mode() == CONST ||
+ local->mode() == CONST_HARMONY ||
+ local->mode() == LET) {
__ CompareRoot(rax, Heap::kTheHoleValueRootIndex);
__ j(not_equal, done);
if (local->mode() == CONST) {
@@ -2298,7 +2300,7 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Property* property = callee->AsProperty();
- if (proxy != NULL && proxy->var()->is_possibly_eval(isolate())) {
+ if (proxy != NULL && proxy->var()->is_possibly_eval()) {
// In a call to eval, we first call %ResolvePossiblyDirectEval to
// resolve the function we need to call and the receiver of the call.
// Then we call the resolved function using the given arguments.
@@ -3038,38 +3040,6 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
}
-void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::ONE_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
-void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
- ZoneList<Expression*>* args = expr->arguments();
- ASSERT_EQ(3, args->length());
-
- VisitForStackValue(args->at(1)); // index
- VisitForStackValue(args->at(2)); // value
- __ pop(rcx);
- __ pop(rbx);
- VisitForAccumulatorValue(args->at(0)); // string
-
- static const String::Encoding encoding = String::TWO_BYTE_ENCODING;
- SeqStringSetCharGenerator::Generate(masm_, encoding, rax, rbx, rcx);
- context()->Plug(rax);
-}
-
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3581,10 +3551,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
__ AddSmiField(string_length,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
__ j(overflow, &bailout);
__ incl(index);
__ cmpl(index, array_length);
@@ -3620,7 +3590,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ movzxbl(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ andb(scratch, Immediate(
kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask));
- __ cmpb(scratch, Immediate(kStringTag | kOneByteStringTag | kSeqStringTag));
+ __ cmpb(scratch, Immediate(kStringTag | kAsciiStringTag | kSeqStringTag));
__ j(not_equal, &bailout);
// Live registers:
@@ -3631,7 +3601,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
// Add (separator length times (array_length - 1)) to string_length.
__ SmiToInteger32(scratch,
- FieldOperand(string, SeqOneByteString::kLengthOffset));
+ FieldOperand(string, SeqAsciiString::kLengthOffset));
__ decl(index);
__ imull(scratch, index);
__ j(overflow, &bailout);
@@ -3644,10 +3614,10 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ AllocateAsciiString(result_pos, string_length, scratch,
index, string, &bailout);
__ movq(result_operand, result_pos);
- __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
+ __ lea(result_pos, FieldOperand(result_pos, SeqAsciiString::kHeaderSize));
__ movq(string, separator_operand);
- __ SmiCompare(FieldOperand(string, SeqOneByteString::kLengthOffset),
+ __ SmiCompare(FieldOperand(string, SeqAsciiString::kLengthOffset),
Smi::FromInt(1));
__ j(equal, &one_char_separator);
__ j(greater, &long_separator);
@@ -3673,7 +3643,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ bind(&loop_1_condition);
@@ -3691,7 +3661,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ bind(&one_char_separator);
// Get the separator ASCII character value.
// Register "string" holds the separator.
- __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
+ __ movzxbl(scratch, FieldOperand(string, SeqAsciiString::kHeaderSize));
__ Set(index, 0);
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
@@ -3717,7 +3687,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incl(index);
__ cmpl(index, array_length_operand);
@@ -3742,7 +3712,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(scratch,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ movq(separator_operand, string);
// Jump into the loop after the code that copies the separator, so the first
@@ -3768,7 +3738,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
__ SmiToInteger32(string_length,
FieldOperand(string, String::kLengthOffset));
__ lea(string,
- FieldOperand(string, SeqOneByteString::kHeaderSize));
+ FieldOperand(string, SeqAsciiString::kHeaderSize));
__ CopyBytes(result_pos, string, string_length);
__ incq(index);
__ j(not_equal, &loop_3); // Loop while (index < 0).
@@ -4086,9 +4056,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
SetSourcePosition(expr->position());
// Call stub for +1/-1.
- __ movq(rdx, rax);
- __ Move(rax, Smi::FromInt(1));
BinaryOpStub stub(expr->binary_op(), NO_OVERWRITE);
+ if (expr->op() == Token::INC) {
+ __ Move(rdx, Smi::FromInt(1));
+ } else {
+ __ movq(rdx, rax);
+ __ Move(rax, Smi::FromInt(1));
+ }
CallIC(stub.GetCode(), RelocInfo::CODE_TARGET, expr->CountBinOpFeedbackId());
patch_site.EmitPatchInfo();
__ bind(&done);
@@ -4307,7 +4281,29 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
- Condition cc = CompareIC::ComputeCondition(op);
+ Condition cc = no_condition;
+ switch (op) {
+ case Token::EQ_STRICT:
+ case Token::EQ:
+ cc = equal;
+ break;
+ case Token::LT:
+ cc = less;
+ break;
+ case Token::GT:
+ cc = greater;
+ break;
+ case Token::LTE:
+ cc = less_equal;
+ break;
+ case Token::GTE:
+ cc = greater_equal;
+ break;
+ case Token::IN:
+ case Token::INSTANCEOF:
+ default:
+ UNREACHABLE();
+ }
__ pop(rdx);
bool inline_smi_code = ShouldInlineSmiCase(op);
diff --git a/deps/v8/src/x64/ic-x64.cc b/deps/v8/src/x64/ic-x64.cc
index 641e24330..efa07a80b 100644
--- a/deps/v8/src/x64/ic-x64.cc
+++ b/deps/v8/src/x64/ic-x64.cc
@@ -1729,7 +1729,7 @@ Condition CompareIC::ComputeCondition(Token::Value op) {
}
-bool CompareIC::HasInlinedSmiCode(Address address) {
+static bool HasInlinedSmiCode(Address address) {
// The address of the instruction following the call.
Address test_instruction_address =
address + Assembler::kCallTargetAddressOffset;
@@ -1740,6 +1740,39 @@ bool CompareIC::HasInlinedSmiCode(Address address) {
}
+void CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+ HandleScope scope;
+ Handle<Code> rewritten;
+ State previous_state = GetState();
+
+ State state = TargetState(previous_state, HasInlinedSmiCode(address()), x, y);
+ if (state == GENERIC) {
+ CompareStub stub(GetCondition(), strict(), NO_COMPARE_FLAGS);
+ rewritten = stub.GetCode();
+ } else {
+ ICCompareStub stub(op_, state);
+ if (state == KNOWN_OBJECTS) {
+ stub.set_known_map(Handle<Map>(Handle<JSObject>::cast(x)->map()));
+ }
+ rewritten = stub.GetCode();
+ }
+ set_target(*rewritten);
+
+#ifdef DEBUG
+ if (FLAG_trace_ic) {
+ PrintF("[CompareIC (%s->%s)#%s]\n",
+ GetStateName(previous_state),
+ GetStateName(state),
+ Token::Name(op_));
+ }
+#endif
+
+ // Activate inlined smi code.
+ if (previous_state == UNINITIALIZED) {
+ PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+ }
+}
+
void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
// The address of the instruction following the call.
Address test_instruction_address =
diff --git a/deps/v8/src/x64/lithium-codegen-x64.cc b/deps/v8/src/x64/lithium-codegen-x64.cc
index bb91813dd..b461e6290 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.cc
+++ b/deps/v8/src/x64/lithium-codegen-x64.cc
@@ -143,7 +143,6 @@ bool LCodeGen::GeneratePrologue() {
__ bind(&ok);
}
- info()->set_prologue_offset(masm_->pc_offset());
__ push(rbp); // Caller's frame pointer.
__ movq(rbp, rsp);
__ push(rsi); // Callee's context.
@@ -233,30 +232,7 @@ bool LCodeGen::GenerateBody() {
}
if (emit_instructions) {
- if (FLAG_code_comments) {
- HValue* hydrogen = instr->hydrogen_value();
- if (hydrogen != NULL) {
- if (hydrogen->IsChange()) {
- HValue* changed_value = HChange::cast(hydrogen)->value();
- int use_id = 0;
- const char* use_mnemo = "dead";
- if (hydrogen->UseCount() >= 1) {
- HValue* use_value = hydrogen->uses().value();
- use_id = use_value->id();
- use_mnemo = use_value->Mnemonic();
- }
- Comment(";;; @%d: %s. <of #%d %s for #%d %s>",
- current_instruction_, instr->Mnemonic(),
- changed_value->id(), changed_value->Mnemonic(),
- use_id, use_mnemo);
- } else {
- Comment(";;; @%d: %s. <#%d>", current_instruction_,
- instr->Mnemonic(), hydrogen->id());
- }
- } else {
- Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
- }
- }
+ Comment(";;; @%d: %s.", current_instruction_, instr->Mnemonic());
instr->CompileToNative(this);
}
}
@@ -1037,43 +1013,6 @@ void LCodeGen::DoMathFloorOfDiv(LMathFloorOfDiv* instr) {
void LCodeGen::DoDivI(LDivI* instr) {
- if (instr->hydrogen()->HasPowerOf2Divisor()) {
- Register dividend = ToRegister(instr->left());
- int32_t divisor =
- HConstant::cast(instr->hydrogen()->right())->Integer32Value();
- int32_t test_value = 0;
- int32_t power = 0;
-
- if (divisor > 0) {
- test_value = divisor - 1;
- power = WhichPowerOf2(divisor);
- } else {
- // Check for (0 / -x) that will produce negative zero.
- if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- __ testl(dividend, dividend);
- DeoptimizeIf(zero, instr->environment());
- }
- // Check for (kMinInt / -1).
- if (divisor == -1 && instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
- __ cmpl(dividend, Immediate(kMinInt));
- DeoptimizeIf(zero, instr->environment());
- }
- test_value = - divisor - 1;
- power = WhichPowerOf2(-divisor);
- }
-
- if (test_value != 0) {
- // Deoptimize if remainder is not 0.
- __ testl(dividend, Immediate(test_value));
- DeoptimizeIf(not_zero, instr->environment());
- __ sarl(dividend, Immediate(power));
- }
-
- if (divisor < 0) __ negl(dividend);
-
- return;
- }
-
LOperand* right = instr->right();
ASSERT(ToRegister(instr->result()).is(rax));
ASSERT(ToRegister(instr->left()).is(rax));
@@ -1099,7 +1038,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
__ bind(&left_not_zero);
}
- // Check for (kMinInt / -1).
+ // Check for (-kMinInt / -1).
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
Label left_not_min_int;
__ cmpl(left_reg, Immediate(kMinInt));
@@ -1271,9 +1210,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
ASSERT(ToRegister(right).is(rcx));
switch (instr->op()) {
- case Token::ROR:
- __ rorl_cl(ToRegister(left));
- break;
case Token::SAR:
__ sarl_cl(ToRegister(left));
break;
@@ -1295,11 +1231,6 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
int value = ToInteger32(LConstantOperand::cast(right));
uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
switch (instr->op()) {
- case Token::ROR:
- if (shift_count != 0) {
- __ rorl(ToRegister(left), Immediate(shift_count));
- }
- break;
case Token::SAR:
if (shift_count != 0) {
__ sarl(ToRegister(left), Immediate(shift_count));
@@ -1474,15 +1405,6 @@ void LCodeGen::DoDateField(LDateField* instr) {
}
-void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
- SeqStringSetCharGenerator::Generate(masm(),
- instr->encoding(),
- ToRegister(instr->string()),
- ToRegister(instr->index()),
- ToRegister(instr->value()));
-}
-
-
void LCodeGen::DoBitNotI(LBitNotI* instr) {
LOperand* input = instr->value();
ASSERT(input->Equals(instr->result()));
@@ -1535,17 +1457,17 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
if (right->IsConstantOperand()) {
Immediate right_imm =
Immediate(ToInteger32(LConstantOperand::cast(right)));
- __ cmpl(left_reg, right_imm);
+ __ cmpq(left_reg, right_imm);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_imm);
} else if (right->IsRegister()) {
Register right_reg = ToRegister(right);
- __ cmpl(left_reg, right_reg);
+ __ cmpq(left_reg, right_reg);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_reg);
} else {
Operand right_op = ToOperand(right);
- __ cmpl(left_reg, right_op);
+ __ cmpq(left_reg, right_op);
__ j(condition, &return_left, Label::kNear);
__ movq(left_reg, right_op);
}
@@ -2690,16 +2612,15 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
}
-void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
+void LCodeGen::DoLoadKeyedFastElement(LLoadKeyedFastElement* instr) {
+ Register result = ToRegister(instr->result());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed (in this case) instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index argument
- // to the bounds check, which can be tagged, so that case must be
- // handled here, too.
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -2708,68 +2629,35 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ movsxlq(key_reg, key_reg);
}
}
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister result(ToDoubleRegister(instr->result()));
- __ movss(result, operand);
- __ cvtss2sd(result, result);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(ToDoubleRegister(instr->result()), operand);
- } else {
- Register result(ToRegister(instr->result()));
- switch (elements_kind) {
- case EXTERNAL_BYTE_ELEMENTS:
- __ movsxbq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- case EXTERNAL_PIXEL_ELEMENTS:
- __ movzxbq(result, operand);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- __ movsxwq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movzxwq(result, operand);
- break;
- case EXTERNAL_INT_ELEMENTS:
- __ movsxlq(result, operand);
- break;
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(result, operand);
- if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
- __ testl(result, result);
- DeoptimizeIf(negative, instr->environment());
- }
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
+ // Load the result.
+ __ movq(result,
+ BuildFastArrayOperand(instr->elements(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index()));
+
+ // Check for the hole value.
+ if (instr->hydrogen()->RequiresHoleCheck()) {
+ if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+ Condition smi = __ CheckSmi(result);
+ DeoptimizeIf(NegateCondition(smi), instr->environment());
+ } else {
+ __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+ DeoptimizeIf(equal, instr->environment());
}
}
}
-void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+void LCodeGen::DoLoadKeyedFastDoubleElement(
+ LLoadKeyedFastDoubleElement* instr) {
XMMRegister result(ToDoubleRegister(instr->result()));
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyed instructions force the input
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
// representation for the key to be an integer, the input gets replaced
// during bound check elimination with the index argument to the bounds
// check, which can be tagged, so that case must be handled here, too.
@@ -2805,57 +2693,6 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
}
-void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
- Register result = ToRegister(instr->result());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that
- // case must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- // Load the result.
- __ movq(result,
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index()));
-
- // Check for the hole value.
- if (instr->hydrogen()->RequiresHoleCheck()) {
- if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
- Condition smi = __ CheckSmi(result);
- DeoptimizeIf(NegateCondition(smi), instr->environment());
- } else {
- __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
- DeoptimizeIf(equal, instr->environment());
- }
- }
-}
-
-
-void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
- if (instr->is_external()) {
- DoLoadKeyedExternalArray(instr);
- } else if (instr->hydrogen()->representation().IsDouble()) {
- DoLoadKeyedFixedDoubleArray(instr);
- } else {
- DoLoadKeyedFixedArray(instr);
- }
-}
-
-
Operand LCodeGen::BuildFastArrayOperand(
LOperand* elements_pointer,
LOperand* key,
@@ -2882,6 +2719,80 @@ Operand LCodeGen::BuildFastArrayOperand(
}
+void LCodeGen::DoLoadKeyedSpecializedArrayElement(
+ LLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister result(ToDoubleRegister(instr->result()));
+ __ movss(result, operand);
+ __ cvtss2sd(result, result);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(ToDoubleRegister(instr->result()), operand);
+ } else {
+ Register result(ToRegister(instr->result()));
+ switch (elements_kind) {
+ case EXTERNAL_BYTE_ELEMENTS:
+ __ movsxbq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ case EXTERNAL_PIXEL_ELEMENTS:
+ __ movzxbq(result, operand);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ __ movsxwq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movzxwq(result, operand);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ __ movsxlq(result, operand);
+ break;
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(result, operand);
+ if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+ __ testl(result, result);
+ DeoptimizeIf(negative, instr->environment());
+ }
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rax));
@@ -3520,16 +3431,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
}
-void LCodeGen::DoMathExp(LMathExp* instr) {
- XMMRegister input = ToDoubleRegister(instr->value());
- XMMRegister result = ToDoubleRegister(instr->result());
- Register temp1 = ToRegister(instr->temp1());
- Register temp2 = ToRegister(instr->temp2());
-
- MathExpGenerator::EmitMathExp(masm(), input, result, xmm0, temp1, temp2);
-}
-
-
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(xmm1));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
@@ -3764,6 +3665,70 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
}
+void LCodeGen::DoStoreKeyedSpecializedArrayElement(
+ LStoreKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ LOperand* key = instr->key();
+ if (!key->IsConstantOperand()) {
+ Register key_reg = ToRegister(key);
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
+ if (instr->hydrogen()->key()->representation().IsTagged()) {
+ __ SmiToInteger64(key_reg, key_reg);
+ } else if (instr->hydrogen()->IsDehoisted()) {
+ // Sign extend key because it could be a 32 bit negative value
+ // and the dehoisted address computation happens in 64 bits
+ __ movsxlq(key_reg, key_reg);
+ }
+ }
+ Operand operand(BuildFastArrayOperand(
+ instr->external_pointer(),
+ key,
+ elements_kind,
+ 0,
+ instr->additional_index()));
+
+ if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
+ XMMRegister value(ToDoubleRegister(instr->value()));
+ __ cvtsd2ss(value, value);
+ __ movss(operand, value);
+ } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
+ __ movsd(operand, ToDoubleRegister(instr->value()));
+ } else {
+ Register value(ToRegister(instr->value()));
+ switch (elements_kind) {
+ case EXTERNAL_PIXEL_ELEMENTS:
+ case EXTERNAL_BYTE_ELEMENTS:
+ case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
+ __ movb(operand, value);
+ break;
+ case EXTERNAL_SHORT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
+ __ movw(operand, value);
+ break;
+ case EXTERNAL_INT_ELEMENTS:
+ case EXTERNAL_UNSIGNED_INT_ELEMENTS:
+ __ movl(operand, value);
+ break;
+ case EXTERNAL_FLOAT_ELEMENTS:
+ case EXTERNAL_DOUBLE_ELEMENTS:
+ case FAST_ELEMENTS:
+ case FAST_SMI_ELEMENTS:
+ case FAST_DOUBLE_ELEMENTS:
+ case FAST_HOLEY_ELEMENTS:
+ case FAST_HOLEY_SMI_ELEMENTS:
+ case FAST_HOLEY_DOUBLE_ELEMENTS:
+ case DICTIONARY_ELEMENTS:
+ case NON_STRICT_ARGUMENTS_ELEMENTS:
+ UNREACHABLE();
+ break;
+ }
+ }
+}
+
+
void LCodeGen::DeoptIfTaggedButNotSmi(LEnvironment* environment,
HValue* value,
LOperand* operand) {
@@ -3824,16 +3789,16 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
}
-void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
+void LCodeGen::DoStoreKeyedFastElement(LStoreKeyedFastElement* instr) {
+ Register value = ToRegister(instr->value());
+ Register elements = ToRegister(instr->object());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the input
- // gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -3842,62 +3807,45 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
__ movsxlq(key_reg, key_reg);
}
}
- Operand operand(BuildFastArrayOperand(
- instr->elements(),
- key,
- elements_kind,
- 0,
- instr->additional_index()));
- if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
- XMMRegister value(ToDoubleRegister(instr->value()));
- __ cvtsd2ss(value, value);
- __ movss(operand, value);
- } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
- __ movsd(operand, ToDoubleRegister(instr->value()));
+ Operand operand =
+ BuildFastArrayOperand(instr->object(),
+ key,
+ FAST_ELEMENTS,
+ FixedArray::kHeaderSize - kHeapObjectTag,
+ instr->additional_index());
+
+ if (instr->hydrogen()->NeedsWriteBarrier()) {
+ ASSERT(!instr->key()->IsConstantOperand());
+ HType type = instr->hydrogen()->value()->type();
+ SmiCheck check_needed =
+ type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+ // Compute address of modified element and store it into key register.
+ Register key_reg(ToRegister(key));
+ __ lea(key_reg, operand);
+ __ movq(Operand(key_reg, 0), value);
+ __ RecordWrite(elements,
+ key_reg,
+ value,
+ kSaveFPRegs,
+ EMIT_REMEMBERED_SET,
+ check_needed);
} else {
- Register value(ToRegister(instr->value()));
- switch (elements_kind) {
- case EXTERNAL_PIXEL_ELEMENTS:
- case EXTERNAL_BYTE_ELEMENTS:
- case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
- __ movb(operand, value);
- break;
- case EXTERNAL_SHORT_ELEMENTS:
- case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
- __ movw(operand, value);
- break;
- case EXTERNAL_INT_ELEMENTS:
- case EXTERNAL_UNSIGNED_INT_ELEMENTS:
- __ movl(operand, value);
- break;
- case EXTERNAL_FLOAT_ELEMENTS:
- case EXTERNAL_DOUBLE_ELEMENTS:
- case FAST_ELEMENTS:
- case FAST_SMI_ELEMENTS:
- case FAST_DOUBLE_ELEMENTS:
- case FAST_HOLEY_ELEMENTS:
- case FAST_HOLEY_SMI_ELEMENTS:
- case FAST_HOLEY_DOUBLE_ELEMENTS:
- case DICTIONARY_ELEMENTS:
- case NON_STRICT_ARGUMENTS_ELEMENTS:
- UNREACHABLE();
- break;
- }
+ __ movq(operand, value);
}
}
-void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+void LCodeGen::DoStoreKeyedFastDoubleElement(
+ LStoreKeyedFastDoubleElement* instr) {
XMMRegister value = ToDoubleRegister(instr->value());
LOperand* key = instr->key();
if (!key->IsConstantOperand()) {
Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
+ // Even though the HLoad/StoreKeyedFastElement instructions force the input
+ // representation for the key to be an integer, the input gets replaced
+ // during bound check elimination with the index argument to the bounds
+ // check, which can be tagged, so that case must be handled here, too.
if (instr->hydrogen()->key()->representation().IsTagged()) {
__ SmiToInteger64(key_reg, key_reg);
} else if (instr->hydrogen()->IsDehoisted()) {
@@ -3930,66 +3878,6 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
__ movsd(double_store_operand, value);
}
-
-void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
- Register value = ToRegister(instr->value());
- Register elements = ToRegister(instr->elements());
- LOperand* key = instr->key();
- if (!key->IsConstantOperand()) {
- Register key_reg = ToRegister(key);
- // Even though the HLoad/StoreKeyedFastElement instructions force
- // the input representation for the key to be an integer, the
- // input gets replaced during bound check elimination with the index
- // argument to the bounds check, which can be tagged, so that case
- // must be handled here, too.
- if (instr->hydrogen()->key()->representation().IsTagged()) {
- __ SmiToInteger64(key_reg, key_reg);
- } else if (instr->hydrogen()->IsDehoisted()) {
- // Sign extend key because it could be a 32 bit negative value
- // and the dehoisted address computation happens in 64 bits
- __ movsxlq(key_reg, key_reg);
- }
- }
-
- Operand operand =
- BuildFastArrayOperand(instr->elements(),
- key,
- FAST_ELEMENTS,
- FixedArray::kHeaderSize - kHeapObjectTag,
- instr->additional_index());
-
- if (instr->hydrogen()->NeedsWriteBarrier()) {
- ASSERT(!instr->key()->IsConstantOperand());
- HType type = instr->hydrogen()->value()->type();
- SmiCheck check_needed =
- type.IsHeapObject() ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
- // Compute address of modified element and store it into key register.
- Register key_reg(ToRegister(key));
- __ lea(key_reg, operand);
- __ movq(Operand(key_reg, 0), value);
- __ RecordWrite(elements,
- key_reg,
- value,
- kSaveFPRegs,
- EMIT_REMEMBERED_SET,
- check_needed);
- } else {
- __ movq(operand, value);
- }
-}
-
-
-void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
- if (instr->is_external()) {
- DoStoreKeyedExternalArray(instr);
- } else if (instr->hydrogen()->value()->representation().IsDouble()) {
- DoStoreKeyedFixedDoubleArray(instr);
- } else {
- DoStoreKeyedFixedArray(instr);
- }
-}
-
-
void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
ASSERT(ToRegister(instr->object()).is(rdx));
ASSERT(ToRegister(instr->key()).is(rcx));
@@ -4649,7 +4537,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
- ASSERT(instr->temp()->Equals(instr->result()));
Register reg = ToRegister(instr->temp());
Handle<JSObject> holder = instr->holder();
diff --git a/deps/v8/src/x64/lithium-codegen-x64.h b/deps/v8/src/x64/lithium-codegen-x64.h
index e068f14b5..65b398016 100644
--- a/deps/v8/src/x64/lithium-codegen-x64.h
+++ b/deps/v8/src/x64/lithium-codegen-x64.h
@@ -335,12 +335,6 @@ class LCodeGen BASE_EMBEDDED {
};
void EnsureSpaceForLazyDeopt(int space_needed);
- void DoLoadKeyedExternalArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
- void DoLoadKeyedFixedArray(LLoadKeyed* instr);
- void DoStoreKeyedExternalArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
- void DoStoreKeyedFixedArray(LStoreKeyed* instr);
Zone* zone_;
LPlatformChunk* const chunk_;
diff --git a/deps/v8/src/x64/lithium-x64.cc b/deps/v8/src/x64/lithium-x64.cc
index 81228cef8..43fb8b9ba 100644
--- a/deps/v8/src/x64/lithium-x64.cc
+++ b/deps/v8/src/x64/lithium-x64.cc
@@ -179,7 +179,6 @@ const char* LArithmeticT::Mnemonic() const {
case Token::BIT_AND: return "bit-and-t";
case Token::BIT_OR: return "bit-or-t";
case Token::BIT_XOR: return "bit-xor-t";
- case Token::ROR: return "ror-t";
case Token::SHL: return "sal-t";
case Token::SAR: return "sar-t";
case Token::SHR: return "shr-t";
@@ -299,11 +298,6 @@ void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
}
-void LMathExp::PrintDataTo(StringStream* stream) {
- value()->PrintTo(stream);
-}
-
-
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@@ -400,27 +394,20 @@ void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
}
-void LLoadKeyed::PrintDataTo(StringStream* stream) {
- elements()->PrintTo(stream);
+void LStoreKeyedFastElement::PrintDataTo(StringStream* stream) {
+ object()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d]", additional_index());
- } else {
- stream->Add("]");
- }
+ stream->Add("] <- ");
+ value()->PrintTo(stream);
}
-void LStoreKeyed::PrintDataTo(StringStream* stream) {
+void LStoreKeyedFastDoubleElement::PrintDataTo(StringStream* stream) {
elements()->PrintTo(stream);
stream->Add("[");
key()->PrintTo(stream);
- if (hydrogen()->IsDehoisted()) {
- stream->Add(" + %d] <-", additional_index());
- } else {
- stream->Add("] <- ");
- }
+ stream->Add("] <- ");
value()->PrintTo(stream);
}
@@ -1051,14 +1038,6 @@ LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), xmm1);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
return MarkAsCall(DefineFixedDouble(result, xmm1), instr);
- } else if (op == kMathExp) {
- ASSERT(instr->representation().IsDouble());
- ASSERT(instr->value()->representation().IsDouble());
- LOperand* value = UseTempRegister(instr->value());
- LOperand* temp1 = TempRegister();
- LOperand* temp2 = TempRegister();
- LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
- return DefineAsRegister(result);
} else {
LOperand* input = UseRegisterAtStart(instr->value());
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input);
@@ -1130,11 +1109,6 @@ LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
}
-LInstruction* LChunkBuilder::DoRor(HRor* instr) {
- return DoShift(Token::ROR, instr);
-}
-
-
LInstruction* LChunkBuilder::DoShr(HShr* instr) {
return DoShift(Token::SHR, instr);
}
@@ -1185,13 +1159,6 @@ LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
if (instr->representation().IsDouble()) {
return DoArithmeticD(Token::DIV, instr);
} else if (instr->representation().IsInteger32()) {
- if (instr->HasPowerOf2Divisor()) {
- ASSERT(!instr->CheckFlag(HValue::kCanBeDivByZero));
- LOperand* value = UseRegisterAtStart(instr->left());
- LDivI* div =
- new(zone()) LDivI(value, UseOrConstant(instr->right()), NULL);
- return AssignEnvironment(DefineSameAsFirst(div));
- }
// The temporary operand is necessary to ensure that right is not allocated
// into rdx.
LOperand* temp = FixedTemp(rdx);
@@ -1425,7 +1392,7 @@ LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
LInstruction* LChunkBuilder::DoCompareIDAndBranch(
HCompareIDAndBranch* instr) {
- Representation r = instr->representation();
+ Representation r = instr->GetInputRepresentation();
if (r.IsInteger32()) {
ASSERT(instr->left()->representation().IsInteger32());
ASSERT(instr->right()->representation().IsInteger32());
@@ -1588,17 +1555,6 @@ LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
}
-LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
- LOperand* string = UseRegister(instr->string());
- LOperand* index = UseRegister(instr->index());
- ASSERT(rcx.is_byte_register());
- LOperand* value = UseFixed(instr->value(), rcx);
- LSeqStringSetChar* result =
- new(zone()) LSeqStringSetChar(instr->encoding(), string, index, value);
- return DefineSameAsFirst(result);
-}
-
-
LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
LOperand* value = UseRegisterOrConstantAtStart(instr->index());
LOperand* length = Use(instr->length());
@@ -1711,9 +1667,9 @@ LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
- LUnallocated* temp = TempRegister();
+ LOperand* temp = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp);
- return AssignEnvironment(Define(result, temp));
+ return AssignEnvironment(result);
}
@@ -1887,37 +1843,63 @@ LInstruction* LChunkBuilder::DoLoadExternalArrayPointer(
}
-LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+LInstruction* LChunkBuilder::DoLoadKeyedFastElement(
+ HLoadKeyedFastElement* instr) {
+ ASSERT(instr->representation().IsTagged());
ASSERT(instr->key()->representation().IsInteger32() ||
instr->key()->representation().IsTagged());
- ElementsKind elements_kind = instr->elements_kind();
+ LOperand* obj = UseRegisterAtStart(instr->object());
bool clobbers_key = instr->key()->representation().IsTagged();
LOperand* key = clobbers_key
? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LLoadKeyed* result = NULL;
+ LLoadKeyedFastElement* result =
+ new(zone()) LLoadKeyedFastElement(obj, key);
+ if (instr->RequiresHoleCheck()) AssignEnvironment(result);
+ return DefineAsRegister(result);
+}
- if (!instr->is_external()) {
- LOperand* obj = UseRegisterAtStart(instr->elements());
- result = new(zone()) LLoadKeyed(obj, key);
- } else {
- ASSERT(
- (instr->representation().IsInteger32() &&
- (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
- (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
- (instr->representation().IsDouble() &&
- ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- LOperand* external_pointer = UseRegister(instr->elements());
- result = new(zone()) LLoadKeyed(external_pointer, key);
- }
- DefineAsRegister(result);
- bool can_deoptimize = instr->RequiresHoleCheck() ||
- (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS);
+LInstruction* LChunkBuilder::DoLoadKeyedFastDoubleElement(
+ HLoadKeyedFastDoubleElement* instr) {
+ ASSERT(instr->representation().IsDouble());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedFastDoubleElement* result =
+ new(zone()) LLoadKeyedFastDoubleElement(elements, key);
+ return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedSpecializedArrayElement(
+ HLoadKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
+ ASSERT(
+ (instr->representation().IsInteger32() &&
+ (elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
+ (elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
+ (instr->representation().IsDouble() &&
+ ((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ LLoadKeyedSpecializedArrayElement* result =
+ new(zone()) LLoadKeyedSpecializedArrayElement(external_pointer, key);
+ LInstruction* load_instr = DefineAsRegister(result);
// An unsigned int array load might overflow and cause a deopt, make sure it
// has an environment.
- return can_deoptimize ? AssignEnvironment(result) : result;
+ return (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) ?
+ AssignEnvironment(load_instr) : load_instr;
}
@@ -1930,52 +1912,71 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
}
-LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
- ElementsKind elements_kind = instr->elements_kind();
- bool clobbers_key = instr->key()->representation().IsTagged();
+LInstruction* LChunkBuilder::DoStoreKeyedFastElement(
+ HStoreKeyedFastElement* instr) {
+ bool needs_write_barrier = instr->NeedsWriteBarrier();
+ ASSERT(instr->value()->representation().IsTagged());
+ ASSERT(instr->object()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
- if (!instr->is_external()) {
- ASSERT(instr->elements()->representation().IsTagged());
- bool needs_write_barrier = instr->NeedsWriteBarrier();
- LOperand* object = NULL;
- LOperand* key = NULL;
- LOperand* val = NULL;
-
- if (instr->value()->representation().IsDouble()) {
- object = UseRegisterAtStart(instr->elements());
- val = UseTempRegister(instr->value());
- key = clobbers_key ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- } else {
- ASSERT(instr->value()->representation().IsTagged());
- object = UseTempRegister(instr->elements());
- val = needs_write_barrier ? UseTempRegister(instr->value())
- : UseRegisterAtStart(instr->value());
- key = (clobbers_key || needs_write_barrier)
- ? UseTempRegister(instr->key())
- : UseRegisterOrConstantAtStart(instr->key());
- }
+ LOperand* obj = UseTempRegister(instr->object());
+ LOperand* val = needs_write_barrier
+ ? UseTempRegister(instr->value())
+ : UseRegisterAtStart(instr->value());
+ bool clobbers_key = needs_write_barrier ||
+ instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastElement(obj, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedFastDoubleElement(
+ HStoreKeyedFastDoubleElement* instr) {
+ ASSERT(instr->value()->representation().IsDouble());
+ ASSERT(instr->elements()->representation().IsTagged());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* elements = UseRegisterAtStart(instr->elements());
+ LOperand* val = UseTempRegister(instr->value());
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
+ : UseRegisterOrConstantAtStart(instr->key());
+ return new(zone()) LStoreKeyedFastDoubleElement(elements, key, val);
+}
- return new(zone()) LStoreKeyed(object, key, val);
- }
+LInstruction* LChunkBuilder::DoStoreKeyedSpecializedArrayElement(
+ HStoreKeyedSpecializedArrayElement* instr) {
+ ElementsKind elements_kind = instr->elements_kind();
ASSERT(
(instr->value()->representation().IsInteger32() &&
(elements_kind != EXTERNAL_FLOAT_ELEMENTS) &&
(elements_kind != EXTERNAL_DOUBLE_ELEMENTS)) ||
(instr->value()->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
- (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
- ASSERT(instr->elements()->representation().IsExternal());
+ (elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
+ ASSERT(instr->external_pointer()->representation().IsExternal());
+ ASSERT(instr->key()->representation().IsInteger32() ||
+ instr->key()->representation().IsTagged());
+
+ LOperand* external_pointer = UseRegister(instr->external_pointer());
bool val_is_temp_register =
elements_kind == EXTERNAL_PIXEL_ELEMENTS ||
elements_kind == EXTERNAL_FLOAT_ELEMENTS;
- LOperand* val = val_is_temp_register ? UseTempRegister(instr->value())
+ LOperand* val = val_is_temp_register
+ ? UseTempRegister(instr->value())
: UseRegister(instr->value());
- LOperand* key = clobbers_key ? UseTempRegister(instr->key())
+ bool clobbers_key = instr->key()->representation().IsTagged();
+ LOperand* key = clobbers_key
+ ? UseTempRegister(instr->key())
: UseRegisterOrConstantAtStart(instr->key());
- LOperand* external_pointer = UseRegister(instr->elements());
- return new(zone()) LStoreKeyed(external_pointer, key, val);
+ return new(zone()) LStoreKeyedSpecializedArrayElement(external_pointer,
+ key, val);
}
@@ -2202,7 +2203,7 @@ LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
env->set_ast_id(instr->ast_id());
env->Drop(instr->pop_count());
- for (int i = instr->values()->length() - 1; i >= 0; --i) {
+ for (int i = 0; i < instr->values()->length(); ++i) {
HValue* value = instr->values()->at(i);
if (instr->HasAssignedIndexAt(i)) {
env->Bind(instr->GetAssignedIndexAt(i), value);
diff --git a/deps/v8/src/x64/lithium-x64.h b/deps/v8/src/x64/lithium-x64.h
index 4a909a1f2..6cf4af661 100644
--- a/deps/v8/src/x64/lithium-x64.h
+++ b/deps/v8/src/x64/lithium-x64.h
@@ -126,12 +126,13 @@ class LCodeGen;
V(LoadFunctionPrototype) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
- V(LoadKeyed) \
+ V(LoadKeyedFastDoubleElement) \
+ V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
+ V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
- V(MathExp) \
V(MathFloorOfDiv) \
V(MathMinMax) \
V(ModI) \
@@ -149,7 +150,6 @@ class LCodeGen;
V(Random) \
V(RegExpLiteral) \
V(Return) \
- V(SeqStringSetChar) \
V(ShiftI) \
V(SmiTag) \
V(SmiUntag) \
@@ -157,8 +157,10 @@ class LCodeGen;
V(StoreContextSlot) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
- V(StoreKeyed) \
+ V(StoreKeyedFastDoubleElement) \
+ V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
+ V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
@@ -622,7 +624,7 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
Token::Value op() const { return hydrogen()->token(); }
bool is_double() const {
- return hydrogen()->representation().IsDouble();
+ return hydrogen()->GetInputRepresentation().IsDouble();
}
virtual void PrintDataTo(StringStream* stream);
@@ -645,25 +647,6 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 0> {
};
-class LMathExp: public LTemplateInstruction<1, 1, 2> {
- public:
- LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
- inputs_[0] = value;
- temps_[0] = temp1;
- temps_[1] = temp2;
- ExternalReference::InitializeMathExpData();
- }
-
- LOperand* value() { return inputs_[0]; }
- LOperand* temp1() { return temps_[0]; }
- LOperand* temp2() { return temps_[1]; }
-
- DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
-
- virtual void PrintDataTo(StringStream* stream);
-};
-
-
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
@@ -1156,30 +1139,6 @@ class LDateField: public LTemplateInstruction<1, 1, 0> {
};
-class LSeqStringSetChar: public LTemplateInstruction<1, 3, 0> {
- public:
- LSeqStringSetChar(String::Encoding encoding,
- LOperand* string,
- LOperand* index,
- LOperand* value) : encoding_(encoding) {
- inputs_[0] = string;
- inputs_[1] = index;
- inputs_[2] = value;
- }
-
- String::Encoding encoding() { return encoding_; }
- LOperand* string() { return inputs_[0]; }
- LOperand* index() { return inputs_[1]; }
- LOperand* value() { return inputs_[2]; }
-
- DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
- DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
-
- private:
- String::Encoding encoding_;
-};
-
-
class LThrow: public LTemplateInstruction<0, 1, 0> {
public:
explicit LThrow(LOperand* value) {
@@ -1394,26 +1353,56 @@ class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
};
-class LLoadKeyed: public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
public:
- LLoadKeyed(LOperand* elements, LOperand* key) {
+ LLoadKeyedFastElement(LOperand* elements, LOperand* key) {
inputs_[0] = elements;
inputs_[1] = key;
}
- DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
- DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastElement, "load-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastElement)
- bool is_external() const {
- return hydrogen()->is_external();
+ LOperand* elements() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedFastDoubleElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedFastDoubleElement(LOperand* elements, LOperand* key) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
}
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedFastDoubleElement,
+ "load-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedFastDoubleElement)
+
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
- virtual void PrintDataTo(StringStream* stream);
uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
+ public:
+ LLoadKeyedSpecializedArrayElement(LOperand* external_pointer, LOperand* key) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ }
+
+ DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
+ "load-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@@ -1910,29 +1899,76 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
};
-class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyedFastElement: public LTemplateInstruction<0, 3, 0> {
public:
- LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+ LStoreKeyedFastElement(LOperand* object, LOperand* key, LOperand* value) {
inputs_[0] = object;
inputs_[1] = key;
inputs_[2] = value;
}
- bool is_external() const { return hydrogen()->is_external(); }
+ LOperand* object() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastElement,
+ "store-keyed-fast-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastElement)
+
+ virtual void PrintDataTo(StringStream* stream);
+
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
+class LStoreKeyedFastDoubleElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedFastDoubleElement(LOperand* elements,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = elements;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
LOperand* elements() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
- ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
- DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
- DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedFastDoubleElement,
+ "store-keyed-fast-double-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedFastDoubleElement)
virtual void PrintDataTo(StringStream* stream);
+
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
+class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
+ public:
+ LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
+ LOperand* key,
+ LOperand* value) {
+ inputs_[0] = external_pointer;
+ inputs_[1] = key;
+ inputs_[2] = value;
+ }
+
+ LOperand* external_pointer() { return inputs_[0]; }
+ LOperand* key() { return inputs_[1]; }
+ LOperand* value() { return inputs_[2]; }
+
+ DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
+ "store-keyed-specialized-array-element")
+ DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
+
+ ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+ uint32_t additional_index() const { return hydrogen()->index_offset(); }
+};
+
+
class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedGeneric(LOperand* object, LOperand* key, LOperand* value) {
@@ -2074,7 +2110,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
-class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 1> {
+class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 1> {
public:
explicit LCheckPrototypeMaps(LOperand* temp) {
temps_[0] = temp;
diff --git a/deps/v8/src/x64/macro-assembler-x64.cc b/deps/v8/src/x64/macro-assembler-x64.cc
index 4e4f2c572..77506741a 100644
--- a/deps/v8/src/x64/macro-assembler-x64.cc
+++ b/deps/v8/src/x64/macro-assembler-x64.cc
@@ -720,28 +720,11 @@ void MacroAssembler::CallApiFunctionAndReturn(Address function_address,
movq(prev_next_address_reg, Operand(base_reg, kNextOffset));
movq(prev_limit_reg, Operand(base_reg, kLimitOffset));
addl(Operand(base_reg, kLevelOffset), Immediate(1));
-
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_enter_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
// Call the api function!
movq(rax, reinterpret_cast<int64_t>(function_address),
RelocInfo::RUNTIME_ENTRY);
call(rax);
- if (FLAG_log_timer_events) {
- FrameScope frame(this, StackFrame::MANUAL);
- PushSafepointRegisters();
- PrepareCallCFunction(0);
- CallCFunction(ExternalReference::log_leave_external_function(isolate()), 0);
- PopSafepointRegisters();
- }
-
#if defined(_WIN64) && !defined(__MINGW64__)
// rax keeps a pointer to v8::Handle, unpack it.
movq(rax, Operand(rax, 0));
@@ -2218,19 +2201,16 @@ void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
const int kFlatAsciiStringMask =
- kIsNotStringMask | kStringEncodingMask | kAsciiDataHintMask |
- kStringRepresentationMask;
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
- shl(scratch1, Immediate(8));
- orl(scratch1, scratch2);
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
@@ -2248,7 +2228,7 @@ void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
andl(scratch, Immediate(kFlatAsciiStringMask));
- cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
+ cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kAsciiStringTag));
j(not_equal, failure, near_jump);
}
@@ -2266,19 +2246,17 @@ void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
// Check that both are flat ASCII strings.
ASSERT(kNotStringTag != 0);
- const int kFlatAsciiStringMask = kIsNotStringMask | kStringRepresentationMask
- | kStringEncodingMask | kAsciiDataHintTag;
+ const int kFlatAsciiStringMask =
+ kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
andl(scratch1, Immediate(kFlatAsciiStringMask));
andl(scratch2, Immediate(kFlatAsciiStringMask));
// Interleave the bits to check both scratch1 and scratch2 in one test.
- ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 8));
- ASSERT_EQ(ASCII_STRING_TYPE, ASCII_STRING_TYPE & kFlatAsciiStringMask);
- shl(scratch1, Immediate(8));
- orl(scratch1, scratch2);
+ ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+ lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
cmpl(scratch1,
- Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 8)));
+ Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
j(not_equal, on_fail, near_jump);
}
@@ -2791,8 +2769,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset) {
+ Label* fail) {
Label smi_value, is_nan, maybe_nan, not_nan, have_double_value, done;
JumpIfSmi(maybe_number, &smi_value, Label::kNear);
@@ -2811,8 +2788,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
bind(&not_nan);
movsd(xmm_scratch, FieldOperand(maybe_number, HeapNumber::kValueOffset));
bind(&have_double_value);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
jmp(&done);
@@ -2835,8 +2811,7 @@ void MacroAssembler::StoreNumberToDoubleElements(
// Preserve original value.
SmiToInteger32(kScratchRegister, maybe_number);
cvtlsi2sd(xmm_scratch, kScratchRegister);
- movsd(FieldOperand(elements, index, times_8,
- FixedDoubleArray::kHeaderSize - elements_offset),
+ movsd(FieldOperand(elements, index, times_8, FixedDoubleArray::kHeaderSize),
xmm_scratch);
bind(&done);
}
@@ -3982,7 +3957,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
Label* gc_required) {
// Calculate the number of bytes needed for the characters in the string while
// observing object alignment.
- const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
+ const int kHeaderAlignment = SeqAsciiString::kHeaderSize &
kObjectAlignmentMask;
movl(scratch1, length);
ASSERT(kCharSize == 1);
@@ -3993,7 +3968,7 @@ void MacroAssembler::AllocateAsciiString(Register result,
}
// Allocate ASCII string in new space.
- AllocateInNewSpace(SeqOneByteString::kHeaderSize,
+ AllocateInNewSpace(SeqAsciiString::kHeaderSize,
times_1,
scratch1,
result,
@@ -4528,7 +4503,7 @@ void MacroAssembler::EnsureNotWhite(
bind(&not_external);
// Sequential string, either ASCII or UC16.
- ASSERT(kOneByteStringTag == 0x04);
+ ASSERT(kAsciiStringTag == 0x04);
and_(length, Immediate(kStringEncodingMask));
xor_(length, Immediate(kStringEncodingMask));
addq(length, Immediate(0x04));
diff --git a/deps/v8/src/x64/macro-assembler-x64.h b/deps/v8/src/x64/macro-assembler-x64.h
index 0d8d6f2cc..cc057ac54 100644
--- a/deps/v8/src/x64/macro-assembler-x64.h
+++ b/deps/v8/src/x64/macro-assembler-x64.h
@@ -895,8 +895,7 @@ class MacroAssembler: public Assembler {
Register elements,
Register index,
XMMRegister xmm_scratch,
- Label* fail,
- int elements_offset = 0);
+ Label* fail);
// Compare an object's map with the specified map and its transitioned
// elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. FLAGS are set with
diff --git a/deps/v8/src/x64/regexp-macro-assembler-x64.cc b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
index 6cb87e899..86f7bfe6c 100644
--- a/deps/v8/src/x64/regexp-macro-assembler-x64.cc
+++ b/deps/v8/src/x64/regexp-macro-assembler-x64.cc
@@ -1305,7 +1305,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
- bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+ bool is_ascii = subject->IsAsciiRepresentationUnderneath();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@@ -1336,7 +1336,7 @@ int RegExpMacroAssemblerX64::CheckStackGuardState(Address* return_address,
}
// String might have changed.
- if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
+ if (subject_tmp->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).
diff --git a/deps/v8/src/x64/stub-cache-x64.cc b/deps/v8/src/x64/stub-cache-x64.cc
index 683aa9d40..cd71086ee 100644
--- a/deps/v8/src/x64/stub-cache-x64.cc
+++ b/deps/v8/src/x64/stub-cache-x64.cc
@@ -350,23 +350,18 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
Register dst,
Register src,
Handle<JSObject> holder,
- PropertyIndex index) {
- if (index.is_header_index()) {
- int offset = index.header_index() * kPointerSize;
+ int index) {
+ // Adjust for the number of properties stored in the holder.
+ index -= holder->map()->inobject_properties();
+ if (index < 0) {
+ // Get the property straight out of the holder.
+ int offset = holder->map()->instance_size() + (index * kPointerSize);
__ movq(dst, FieldOperand(src, offset));
} else {
- // Adjust for the number of properties stored in the holder.
- int slot = index.field_index() - holder->map()->inobject_properties();
- if (slot < 0) {
- // Get the property straight out of the holder.
- int offset = holder->map()->instance_size() + (slot * kPointerSize);
- __ movq(dst, FieldOperand(src, offset));
- } else {
- // Calculate the offset into the properties array.
- int offset = slot * kPointerSize + FixedArray::kHeaderSize;
- __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
- __ movq(dst, FieldOperand(dst, offset));
- }
+ // Calculate the offset into the properties array.
+ int offset = index * kPointerSize + FixedArray::kHeaderSize;
+ __ movq(dst, FieldOperand(src, JSObject::kPropertiesOffset));
+ __ movq(dst, FieldOperand(dst, offset));
}
}
@@ -1018,7 +1013,7 @@ void StubCompiler::GenerateLoadField(Handle<JSObject> object,
Register scratch1,
Register scratch2,
Register scratch3,
- PropertyIndex index,
+ int index,
Handle<String> name,
Label* miss) {
// Check that the receiver isn't a smi.
@@ -1393,7 +1388,7 @@ void CallStubCompiler::GenerateMissBranch() {
Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// rcx : function name
@@ -1487,7 +1482,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
Label call_builtin;
if (argc == 1) { // Otherwise fall through to call builtin.
- Label attempt_to_grow_elements, with_write_barrier, check_double;
+ Label attempt_to_grow_elements, with_write_barrier;
// Get the elements array of the object.
__ movq(rdi, FieldOperand(rdx, JSArray::kElementsOffset));
@@ -1495,7 +1490,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// Check that the elements are in fast mode and writable.
__ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
factory()->fixed_array_map());
- __ j(not_equal, &check_double);
+ __ j(not_equal, &call_builtin);
// Get the array's length into rax and calculate new length.
__ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
@@ -1526,34 +1521,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ Integer32ToSmi(rax, rax); // Return new length as smi.
__ ret((argc + 1) * kPointerSize);
- __ bind(&check_double);
-
- // Check that the elements are in double mode.
- __ Cmp(FieldOperand(rdi, HeapObject::kMapOffset),
- factory()->fixed_double_array_map());
- __ j(not_equal, &call_builtin);
-
- // Get the array's length into rax and calculate new length.
- __ SmiToInteger32(rax, FieldOperand(rdx, JSArray::kLengthOffset));
- STATIC_ASSERT(FixedArray::kMaxLength < Smi::kMaxValue);
- __ addl(rax, Immediate(argc));
-
- // Get the elements' length into rcx.
- __ SmiToInteger32(rcx, FieldOperand(rdi, FixedArray::kLengthOffset));
-
- // Check if we could survive without allocation.
- __ cmpl(rax, rcx);
- __ j(greater, &call_builtin);
-
- __ movq(rcx, Operand(rsp, argc * kPointerSize));
- __ StoreNumberToDoubleElements(
- rcx, rdi, rax, xmm0, &call_builtin, argc * kDoubleSize);
-
- // Save new length.
- __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rax);
- __ Integer32ToSmi(rax, rax); // Return new length as smi.
- __ ret((argc + 1) * kPointerSize);
-
__ bind(&with_write_barrier);
__ movq(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
@@ -1565,9 +1532,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
// In case of fast smi-only, convert to fast object, otherwise bail out.
__ bind(&not_fast_object);
__ CheckFastSmiElements(rbx, &call_builtin);
- __ Cmp(FieldOperand(rcx, HeapObject::kMapOffset),
- factory()->heap_number_map());
- __ j(equal, &call_builtin);
// rdx: receiver
// rbx: map
@@ -2816,7 +2780,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<String> name,
Handle<Code> LoadStubCompiler::CompileLoadField(Handle<JSObject> object,
Handle<JSObject> holder,
- PropertyIndex index,
+ int index,
Handle<String> name) {
// ----------- S t a t e -------------
// -- rax : receiver
@@ -3009,7 +2973,7 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
Handle<Code> KeyedLoadStubCompiler::CompileLoadField(Handle<String> name,
Handle<JSObject> receiver,
Handle<JSObject> holder,
- PropertyIndex index) {
+ int index) {
// ----------- S t a t e -------------
// -- rax : key
// -- rdx : receiver
@@ -3276,7 +3240,6 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#endif
// Load the initial map and verify that it is in fact a map.
- // rdi: constructor
__ movq(rbx, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
STATIC_ASSERT(kSmiTag == 0);
@@ -3286,22 +3249,18 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
#ifdef DEBUG
// Cannot construct functions this way.
+ // rdi: constructor
// rbx: initial map
__ CmpInstanceType(rbx, JS_FUNCTION_TYPE);
- __ Check(not_equal, "Function constructed by construct stub.");
+ __ Assert(not_equal, "Function constructed by construct stub.");
#endif
// Now allocate the JSObject in new space.
+ // rdi: constructor
// rbx: initial map
- ASSERT(function->has_initial_map());
- int instance_size = function->initial_map()->instance_size();
-#ifdef DEBUG
__ movzxbq(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
__ shl(rcx, Immediate(kPointerSizeLog2));
- __ cmpq(rcx, Immediate(instance_size));
- __ Check(equal, "Instance size of initial map changed.");
-#endif
- __ AllocateInNewSpace(instance_size, rdx, rcx, no_reg,
+ __ AllocateInNewSpace(rcx, rdx, rcx, no_reg,
&generic_stub_call, NO_ALLOCATION_FLAGS);
// Allocated the JSObject, now initialize the fields and add the heap tag.
@@ -3347,6 +3306,7 @@ Handle<Code> ConstructStubCompiler::CompileConstructStub(
}
// Fill the unused in-object property fields with undefined.
+ ASSERT(function->has_initial_map());
for (int i = shared->this_property_assignments_count();
i < function->initial_map()->inobject_properties();
i++) {
@@ -4026,7 +3986,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// -- rsp[0] : return address
// -----------------------------------
Label miss_force_generic, transition_elements_kind, finish_store;
- Label grow, slow, check_capacity, restore_key_transition_elements_kind;
+ Label grow, slow, check_capacity;
// This stub is meant to be tail-jumped to, the receiver must already
// have been verified by the caller to not be a smi.
@@ -4055,7 +4015,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ bind(&finish_store);
__ SmiToInteger32(rcx, rcx);
__ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
+ &transition_elements_kind);
__ ret(0);
// Handle store cache miss, replacing the ic with the generic stub.
@@ -4064,10 +4024,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
__ jmp(ic_force_generic, RelocInfo::CODE_TARGET);
- __ bind(&restore_key_transition_elements_kind);
+ __ bind(&transition_elements_kind);
// Restore smi-tagging of rcx.
__ Integer32ToSmi(rcx, rcx);
- __ bind(&transition_elements_kind);
Handle<Code> ic_miss = masm->isolate()->builtins()->KeyedStoreIC_Miss();
__ jmp(ic_miss, RelocInfo::CODE_TARGET);
@@ -4108,16 +4067,6 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
__ Move(FieldOperand(rdi, FixedDoubleArray::kLengthOffset),
Smi::FromInt(JSArray::kPreallocatedArrayElements));
- // Increment the length of the array.
- __ SmiToInteger32(rcx, rcx);
- __ StoreNumberToDoubleElements(rax, rdi, rcx, xmm0,
- &restore_key_transition_elements_kind);
-
- __ movq(r8, BitCast<int64_t, uint64_t>(kHoleNanInt64), RelocInfo::NONE);
- for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
- __ movq(FieldOperand(rdi, FixedDoubleArray::OffsetOfElementAt(i)), r8);
- }
-
// Install the new backing store in the JSArray.
__ movq(FieldOperand(rdx, JSObject::kElementsOffset), rdi);
__ RecordWriteField(rdx, JSObject::kElementsOffset, rdi, rbx,
@@ -4126,7 +4075,7 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// Increment the length of the array.
__ Move(FieldOperand(rdx, JSArray::kLengthOffset), Smi::FromInt(1));
__ movq(rdi, FieldOperand(rdx, JSObject::kElementsOffset));
- __ ret(0);
+ __ jmp(&finish_store);
__ bind(&check_capacity);
// rax: value
diff --git a/deps/v8/test/cctest/cctest.gyp b/deps/v8/test/cctest/cctest.gyp
index 80eecfd03..66d848c5c 100644
--- a/deps/v8/test/cctest/cctest.gyp
+++ b/deps/v8/test/cctest/cctest.gyp
@@ -79,7 +79,6 @@
'test-lockers.cc',
'test-log.cc',
'test-mark-compact.cc',
- 'test-object-observe.cc',
'test-parsing.cc',
'test-platform-tls.cc',
'test-profile-generator.cc',
diff --git a/deps/v8/test/cctest/cctest.h b/deps/v8/test/cctest/cctest.h
index 88cb9b8c5..f3961a4ad 100644
--- a/deps/v8/test/cctest/cctest.h
+++ b/deps/v8/test/cctest/cctest.h
@@ -233,24 +233,4 @@ static inline int FlagDependentPortOffset() {
}
-// Helper function that simulates a fill new-space in the heap.
-static inline void SimulateFullSpace(v8::internal::NewSpace* space) {
- int new_linear_size = static_cast<int>(
- *space->allocation_limit_address() - *space->allocation_top_address());
- v8::internal::MaybeObject* maybe = space->AllocateRaw(new_linear_size);
- v8::internal::FreeListNode* node = v8::internal::FreeListNode::cast(maybe);
- node->set_size(space->heap(), new_linear_size);
-}
-
-
-// Helper function that simulates a full old-space in the heap.
-static inline void SimulateFullSpace(v8::internal::PagedSpace* space) {
- int old_linear_size = static_cast<int>(space->limit() - space->top());
- space->Free(space->top(), old_linear_size);
- space->SetTop(space->limit(), space->limit());
- space->ResetFreeList();
- space->ClearStats();
-}
-
-
#endif // ifndef CCTEST_H_
diff --git a/deps/v8/test/cctest/test-accessors.cc b/deps/v8/test/cctest/test-accessors.cc
index d44503534..0b342ff3d 100644
--- a/deps/v8/test/cctest/test-accessors.cc
+++ b/deps/v8/test/cctest/test-accessors.cc
@@ -453,29 +453,3 @@ THREADED_TEST(HandleScopeSegment) {
"result;"))->Run();
CHECK_EQ(100, result->Int32Value());
}
-
-
-v8::Handle<v8::Array> JSONStringifyEnumerator(const AccessorInfo& info) {
- v8::Handle<v8::Array> array = v8::Array::New(1);
- array->Set(0, v8_str("regress"));
- return array;
-}
-
-
-v8::Handle<v8::Value> JSONStringifyGetter(Local<String> name,
- const AccessorInfo& info) {
- return v8_str("crbug-161028");
-}
-
-
-THREADED_TEST(JSONStringifyNamedInterceptorObject) {
- v8::HandleScope scope;
- LocalContext env;
-
- v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New();
- obj->SetNamedPropertyHandler(
- JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator);
- env->Global()->Set(v8_str("obj"), obj->NewInstance());
- v8::Handle<v8::String> expected = v8_str("{\"regress\":\"crbug-161028\"}");
- CHECK(CompileRun("JSON.stringify(obj)")->Equals(expected));
-}
diff --git a/deps/v8/test/cctest/test-alloc.cc b/deps/v8/test/cctest/test-alloc.cc
index bbae5ebd3..7ba2583da 100644
--- a/deps/v8/test/cctest/test-alloc.cc
+++ b/deps/v8/test/cctest/test-alloc.cc
@@ -34,13 +34,34 @@
using namespace v8::internal;
+// Also used in test-heap.cc test cases.
+void SimulateFullSpace(PagedSpace* space) {
+ int old_linear_size = static_cast<int>(space->limit() - space->top());
+ space->Free(space->top(), old_linear_size);
+ space->SetTop(space->limit(), space->limit());
+ space->ResetFreeList();
+ space->ClearStats();
+}
+
+
static MaybeObject* AllocateAfterFailures() {
static int attempts = 0;
if (++attempts < 3) return Failure::RetryAfterGC();
Heap* heap = Isolate::Current()->heap();
// New space.
- SimulateFullSpace(heap->new_space());
+ NewSpace* new_space = heap->new_space();
+ static const int kNewSpaceFillerSize = ByteArray::SizeFor(0);
+ while (new_space->Available() > kNewSpaceFillerSize) {
+ int available_before = static_cast<int>(new_space->Available());
+ CHECK(!heap->AllocateByteArray(0)->IsFailure());
+ if (available_before == new_space->Available()) {
+ // It seems that we are avoiding new space allocations when
+ // allocation is forced, so no need to fill up new space
+ // in order to make the test harder.
+ break;
+ }
+ }
CHECK(!heap->AllocateByteArray(100)->IsFailure());
CHECK(!heap->AllocateFixedArray(100, NOT_TENURED)->IsFailure());
@@ -55,7 +76,7 @@ static MaybeObject* AllocateAfterFailures() {
// Old data space.
SimulateFullSpace(heap->old_data_space());
- CHECK(!heap->AllocateRawOneByteString(100, TENURED)->IsFailure());
+ CHECK(!heap->AllocateRawAsciiString(100, TENURED)->IsFailure());
// Old pointer space.
SimulateFullSpace(heap->old_pointer_space());
@@ -79,7 +100,6 @@ static MaybeObject* AllocateAfterFailures() {
CHECK(!heap->AllocateMap(JS_OBJECT_TYPE, instance_size)->IsFailure());
// Test that we can allocate in old pointer space and code space.
- SimulateFullSpace(heap->code_space());
CHECK(!heap->AllocateFixedArray(100, TENURED)->IsFailure());
CHECK(!heap->CopyCode(Isolate::Current()->builtins()->builtin(
Builtins::kIllegal))->IsFailure());
diff --git a/deps/v8/test/cctest/test-api.cc b/deps/v8/test/cctest/test-api.cc
index 0a5583bb9..3be068009 100644
--- a/deps/v8/test/cctest/test-api.cc
+++ b/deps/v8/test/cctest/test-api.cc
@@ -25,9 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-// We want to test our deprecated API entries, too.
-#define V8_DISABLE_DEPRECATIONS 1
-
#include <limits.h>
#ifndef WIN32
@@ -626,8 +623,6 @@ THREADED_TEST(UsingExternalAsciiString) {
THREADED_TEST(ScavengeExternalString) {
- i::FLAG_stress_compaction = false;
- i::FLAG_gc_global = false;
int dispose_count = 0;
bool in_new_space = false;
{
@@ -648,8 +643,6 @@ THREADED_TEST(ScavengeExternalString) {
THREADED_TEST(ScavengeExternalAsciiString) {
- i::FLAG_stress_compaction = false;
- i::FLAG_gc_global = false;
int dispose_count = 0;
bool in_new_space = false;
{
@@ -2063,99 +2056,6 @@ THREADED_TEST(InternalFieldsNativePointersAndExternal) {
}
-static void CheckAlignedPointerInInternalField(Handle<v8::Object> obj,
- void* value) {
- CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
- obj->SetPointerInInternalField(0, value);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(value, obj->GetPointerFromInternalField(0));
-}
-
-
-THREADED_TEST(InternalFieldsAlignedPointers) {
- v8::HandleScope scope;
- LocalContext env;
-
- Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New();
- Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
- instance_templ->SetInternalFieldCount(1);
- Local<v8::Object> obj = templ->GetFunction()->NewInstance();
- CHECK_EQ(1, obj->InternalFieldCount());
-
- CheckAlignedPointerInInternalField(obj, NULL);
-
- int* heap_allocated = new int[100];
- CheckAlignedPointerInInternalField(obj, heap_allocated);
- delete[] heap_allocated;
-
- int stack_allocated[100];
- CheckAlignedPointerInInternalField(obj, stack_allocated);
-
- void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
- CheckAlignedPointerInInternalField(obj, huge);
-}
-
-
-static void CheckAlignedPointerInEmbedderData(LocalContext* env,
- int index,
- void* value) {
- CHECK_EQ(0, static_cast<int>(reinterpret_cast<uintptr_t>(value) & 0x1));
- (*env)->SetAlignedPointerInEmbedderData(index, value);
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- CHECK_EQ(value, (*env)->GetAlignedPointerFromEmbedderData(index));
-}
-
-
-static void* AlignedTestPointer(int i) {
- return reinterpret_cast<void*>(i * 1234);
-}
-
-
-THREADED_TEST(EmbedderDataAlignedPointers) {
- v8::HandleScope scope;
- LocalContext env;
-
- CheckAlignedPointerInEmbedderData(&env, 0, NULL);
-
- int* heap_allocated = new int[100];
- CheckAlignedPointerInEmbedderData(&env, 1, heap_allocated);
- delete[] heap_allocated;
-
- int stack_allocated[100];
- CheckAlignedPointerInEmbedderData(&env, 2, stack_allocated);
-
- void* huge = reinterpret_cast<void*>(~static_cast<uintptr_t>(1));
- CheckAlignedPointerInEmbedderData(&env, 3, huge);
-
- // Test growing of the embedder data's backing store.
- for (int i = 0; i < 100; i++) {
- env->SetAlignedPointerInEmbedderData(i, AlignedTestPointer(i));
- }
- HEAP->CollectAllGarbage(i::Heap::kNoGCFlags);
- for (int i = 0; i < 100; i++) {
- CHECK_EQ(AlignedTestPointer(i), env->GetAlignedPointerFromEmbedderData(i));
- }
-}
-
-
-static void CheckEmbedderData(LocalContext* env,
- int index,
- v8::Handle<Value> data) {
- (*env)->SetEmbedderData(index, data);
- CHECK((*env)->GetEmbedderData(index)->StrictEquals(data));
-}
-
-THREADED_TEST(EmbedderData) {
- v8::HandleScope scope;
- LocalContext env;
-
- CheckEmbedderData(&env, 3, v8::String::New("The quick brown fox jumps"));
- CheckEmbedderData(&env, 2, v8::String::New("over the lazy dog."));
- CheckEmbedderData(&env, 1, v8::Number::New(1.2345));
- CheckEmbedderData(&env, 0, v8::Boolean::New(true));
-}
-
-
THREADED_TEST(IdentityHash) {
v8::HandleScope scope;
LocalContext env;
@@ -2349,24 +2249,6 @@ THREADED_TEST(GlobalHandle) {
}
CHECK_EQ(global->Length(), 3);
global.Dispose();
-
- {
- v8::HandleScope scope;
- Local<String> str = v8_str("str");
- global = v8::Persistent<String>::New(str);
- }
- CHECK_EQ(global->Length(), 3);
- global.Dispose(v8::Isolate::GetCurrent());
-}
-
-
-THREADED_TEST(LocalHandle) {
- v8::HandleScope scope;
- v8::Local<String> local = v8::Local<String>::New(v8_str("str"));
- CHECK_EQ(local->Length(), 3);
-
- local = v8::Local<String>::New(v8::Isolate::GetCurrent(), v8_str("str"));
- CHECK_EQ(local->Length(), 3);
}
@@ -2487,41 +2369,23 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Object> g2s2;
Persistent<Object> g3s1;
Persistent<Object> g3s2;
- Persistent<Object> g4s1;
- Persistent<Object> g4s2;
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
{
HandleScope scope;
g1s1 = Persistent<Object>::New(Object::New());
g1s2 = Persistent<Object>::New(Object::New());
g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- CHECK(g1s1.IsWeak());
- CHECK(g1s2.IsWeak());
g2s1 = Persistent<Object>::New(Object::New());
g2s2 = Persistent<Object>::New(Object::New());
g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- CHECK(g2s1.IsWeak());
- CHECK(g2s2.IsWeak());
g3s1 = Persistent<Object>::New(Object::New());
g3s2 = Persistent<Object>::New(Object::New());
g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- CHECK(g3s1.IsWeak());
- CHECK(g3s2.IsWeak());
-
- g4s1 = Persistent<Object>::New(Object::New());
- g4s2 = Persistent<Object>::New(Object::New());
- g4s1.MakeWeak(isolate,
- reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g4s2.MakeWeak(isolate,
- reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- CHECK(g4s1.IsWeak(isolate));
- CHECK(g4s2.IsWeak(isolate));
}
Persistent<Object> root = Persistent<Object>::New(g1s1); // make a root.
@@ -2535,17 +2399,13 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g2_children[] = { g3s1 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g4s1 };
- Persistent<Value> g4_objects[] = { g4s1, g4s2 };
- Persistent<Value> g4_children[] = { g1s1 };
+ Persistent<Value> g3_children[] = { g1s1 };
V8::AddObjectGroup(g1_objects, 2);
V8::AddImplicitReferences(g1s1, g1_children, 1);
V8::AddObjectGroup(g2_objects, 2);
V8::AddImplicitReferences(g2s1, g2_children, 1);
V8::AddObjectGroup(g3_objects, 2);
V8::AddImplicitReferences(g3s1, g3_children, 1);
- V8::AddObjectGroup(isolate, g4_objects, 2);
- V8::AddImplicitReferences(g4s1, g4_children, 1);
}
// Do a single full GC
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
@@ -2563,117 +2423,17 @@ THREADED_TEST(ApiObjectGroupsCycle) {
Persistent<Value> g2_objects[] = { g2s1, g2s2 };
Persistent<Value> g2_children[] = { g3s1 };
Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- Persistent<Value> g3_children[] = { g4s1 };
- Persistent<Value> g4_objects[] = { g4s1, g4s2 };
- Persistent<Value> g4_children[] = { g1s1 };
+ Persistent<Value> g3_children[] = { g1s1 };
V8::AddObjectGroup(g1_objects, 2);
V8::AddImplicitReferences(g1s1, g1_children, 1);
V8::AddObjectGroup(g2_objects, 2);
V8::AddImplicitReferences(g2s1, g2_children, 1);
V8::AddObjectGroup(g3_objects, 2);
V8::AddImplicitReferences(g3s1, g3_children, 1);
- V8::AddObjectGroup(g4_objects, 2);
- V8::AddImplicitReferences(g4s1, g4_children, 1);
}
HEAP->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
- // All objects should be gone. 9 global handles in total.
- CHECK_EQ(9, counter.NumberOfWeakCalls());
-}
-
-
-// TODO(mstarzinger): This should be a THREADED_TEST but causes failures
-// on the buildbots, so was made non-threaded for the time being.
-TEST(ApiObjectGroupsCycleForScavenger) {
- i::FLAG_stress_compaction = false;
- i::FLAG_gc_global = false;
- HandleScope scope;
- LocalContext env;
-
- WeakCallCounter counter(1234);
-
- Persistent<Object> g1s1;
- Persistent<Object> g1s2;
- Persistent<Object> g2s1;
- Persistent<Object> g2s2;
- Persistent<Object> g3s1;
- Persistent<Object> g3s2;
-
- {
- HandleScope scope;
- g1s1 = Persistent<Object>::New(Object::New());
- g1s2 = Persistent<Object>::New(Object::New());
- g1s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g1s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
-
- g2s1 = Persistent<Object>::New(Object::New());
- g2s2 = Persistent<Object>::New(Object::New());
- g2s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g2s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
-
- g3s1 = Persistent<Object>::New(Object::New());
- g3s2 = Persistent<Object>::New(Object::New());
- g3s1.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- g3s2.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- }
-
- // Make a root.
- Persistent<Object> root = Persistent<Object>::New(g1s1);
- root.MarkPartiallyDependent();
-
- // Connect groups. We're building the following cycle:
- // G1: { g1s1, g2s1 }, g1s1 implicitly references g2s1, ditto for other
- // groups.
- {
- g1s1.MarkPartiallyDependent();
- g1s2.MarkPartiallyDependent();
- g2s1.MarkPartiallyDependent();
- g2s2.MarkPartiallyDependent();
- g3s1.MarkPartiallyDependent();
- g3s2.MarkPartiallyDependent();
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), g2s1);
- V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), g3s1);
- V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), g1s1);
- }
-
- HEAP->CollectGarbage(i::NEW_SPACE);
-
- // All objects should be alive.
- CHECK_EQ(0, counter.NumberOfWeakCalls());
-
- // Weaken the root.
- root.MakeWeak(reinterpret_cast<void*>(&counter), &WeakPointerCallback);
- root.MarkPartiallyDependent();
-
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
- // Groups are deleted, rebuild groups.
- {
- g1s1.MarkPartiallyDependent(isolate);
- g1s2.MarkPartiallyDependent(isolate);
- g2s1.MarkPartiallyDependent(isolate);
- g2s2.MarkPartiallyDependent(isolate);
- g3s1.MarkPartiallyDependent(isolate);
- g3s2.MarkPartiallyDependent(isolate);
- Persistent<Value> g1_objects[] = { g1s1, g1s2 };
- Persistent<Value> g2_objects[] = { g2s1, g2s2 };
- Persistent<Value> g3_objects[] = { g3s1, g3s2 };
- V8::AddObjectGroup(g1_objects, 2);
- g1s1->Set(v8_str("x"), g2s1);
- V8::AddObjectGroup(g2_objects, 2);
- g2s1->Set(v8_str("x"), g3s1);
- V8::AddObjectGroup(g3_objects, 2);
- g3s1->Set(v8_str("x"), g1s1);
- }
-
- HEAP->CollectGarbage(i::NEW_SPACE);
-
// All objects should be gone. 7 global handles in total.
CHECK_EQ(7, counter.NumberOfWeakCalls());
}
@@ -2692,18 +2452,6 @@ THREADED_TEST(ScriptException) {
}
-TEST(TryCatchCustomException) {
- v8::HandleScope scope;
- LocalContext env;
- v8::TryCatch try_catch;
- CompileRun("function CustomError() { this.a = 'b'; }"
- "(function f() { throw new CustomError(); })();");
- CHECK(try_catch.HasCaught());
- CHECK(try_catch.Exception()->ToObject()->
- Get(v8_str("a"))->Equals(v8_str("b")));
-}
-
-
bool message_received;
@@ -3756,30 +3504,6 @@ THREADED_TEST(TryCatchAndFinally) {
}
-static void TryCatchNestedHelper(int depth) {
- if (depth > 0) {
- v8::TryCatch try_catch;
- try_catch.SetVerbose(true);
- TryCatchNestedHelper(depth - 1);
- CHECK(try_catch.HasCaught());
- try_catch.ReThrow();
- } else {
- v8::ThrowException(v8_str("back"));
- }
-}
-
-
-TEST(TryCatchNested) {
- v8::V8::Initialize();
- v8::HandleScope scope;
- LocalContext context;
- v8::TryCatch try_catch;
- TryCatchNestedHelper(5);
- CHECK(try_catch.HasCaught());
- CHECK_EQ(0, strcmp(*v8::String::Utf8Value(try_catch.Exception()), "back"));
-}
-
-
THREADED_TEST(Equality) {
v8::HandleScope scope;
LocalContext context;
@@ -5584,28 +5308,20 @@ THREADED_TEST(IndependentWeakHandle) {
v8::Persistent<Context> context = Context::New();
Context::Scope context_scope(context);
- v8::Persistent<v8::Object> object_a, object_b;
+ v8::Persistent<v8::Object> object_a;
{
v8::HandleScope handle_scope;
object_a = v8::Persistent<v8::Object>::New(v8::Object::New());
- object_b = v8::Persistent<v8::Object>::New(v8::Object::New());
}
- v8::Isolate* isolate = v8::Isolate::GetCurrent();
bool object_a_disposed = false;
- bool object_b_disposed = false;
object_a.MakeWeak(&object_a_disposed, &DisposeAndSetFlag);
- object_b.MakeWeak(&object_b_disposed, &DisposeAndSetFlag);
CHECK(!object_a.IsIndependent());
- CHECK(!object_b.IsIndependent(isolate));
object_a.MarkIndependent();
- object_b.MarkIndependent(isolate);
CHECK(object_a.IsIndependent());
- CHECK(object_b.IsIndependent(isolate));
HEAP->PerformScavenge();
CHECK(object_a_disposed);
- CHECK(object_b_disposed);
}
@@ -8129,8 +7845,12 @@ THREADED_TEST(ShadowObject) {
Local<ObjectTemplate> proto = t->PrototypeTemplate();
Local<ObjectTemplate> instance = t->InstanceTemplate();
+ // Only allow calls of f on instances of t.
+ Local<v8::Signature> signature = v8::Signature::New(t);
proto->Set(v8_str("f"),
- v8::FunctionTemplate::New(ShadowFunctionCallback, Local<Value>()));
+ v8::FunctionTemplate::New(ShadowFunctionCallback,
+ Local<Value>(),
+ signature));
proto->Set(v8_str("x"), v8_num(12));
instance->SetAccessor(v8_str("y"), ShadowYGetter, ShadowYSetter);
@@ -9995,7 +9715,6 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
@@ -10026,7 +9745,6 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
@@ -10063,7 +9781,6 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
@@ -10100,7 +9817,6 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
@@ -10140,7 +9856,6 @@ THREADED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
templ->SetNamedPropertyHandler(InterceptorCallICFastApi,
NULL, NULL, NULL, NULL,
@@ -10203,7 +9918,6 @@ THREADED_TEST(CallICFastApi_SimpleSignature) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10231,7 +9945,6 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss1) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10264,7 +9977,6 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
v8::Signature::New(fun_templ));
v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
CHECK(!templ.IsEmpty());
LocalContext context;
@@ -10291,42 +10003,6 @@ THREADED_TEST(CallICFastApi_SimpleSignature_Miss2) {
CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
}
-THREADED_TEST(CallICFastApi_SimpleSignature_TypeError) {
- v8::HandleScope scope;
- v8::Handle<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New();
- v8::Handle<v8::FunctionTemplate> method_templ =
- v8::FunctionTemplate::New(FastApiCallback_SimpleSignature,
- v8_str("method_data"),
- v8::Signature::New(fun_templ));
- v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
- proto_templ->Set(v8_str("method"), method_templ);
- fun_templ->SetHiddenPrototype(true);
- v8::Handle<v8::ObjectTemplate> templ(fun_templ->InstanceTemplate());
- CHECK(!templ.IsEmpty());
- LocalContext context;
- v8::Handle<v8::Function> fun = fun_templ->GetFunction();
- GenerateSomeGarbage();
- context->Global()->Set(v8_str("o"), fun->NewInstance());
- v8::TryCatch try_catch;
- CompileRun(
- "o.foo = 17;"
- "var receiver = {};"
- "receiver.__proto__ = o;"
- "var result = 0;"
- "var saved_result = 0;"
- "for (var i = 0; i < 100; i++) {"
- " result = receiver.method(41);"
- " if (i == 50) {"
- " saved_result = result;"
- " receiver = Object.create(receiver);"
- " }"
- "}");
- CHECK(try_catch.HasCaught());
- CHECK_EQ(v8_str("TypeError: Illegal invocation"),
- try_catch.Exception()->ToString());
- CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
-}
-
v8::Handle<Value> keyed_call_ic_function;
@@ -11478,7 +11154,6 @@ static void RunLoopInNewEnv() {
TEST(SetFunctionEntryHook) {
i::FLAG_allow_natives_syntax = true;
- i::FLAG_use_inlining = false;
// Test setting and resetting the entry hook.
// Nulling it should always succeed.
@@ -11611,6 +11286,10 @@ static void event_handler(const v8::JitCodeEvent* event) {
}
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(i::PagedSpace* space);
+
+
static bool MatchPointers(void* key1, void* key2) {
return key1 == key2;
}
@@ -12732,7 +12411,7 @@ static void MorphAString(i::String* string,
AsciiVectorResource* ascii_resource,
UC16VectorResource* uc16_resource) {
CHECK(i::StringShape(string).IsExternal());
- if (string->IsOneByteRepresentation()) {
+ if (string->IsAsciiRepresentation()) {
// Check old map is not symbol or long.
CHECK(string->map() == HEAP->external_ascii_string_map());
// Morph external string to be TwoByte string.
@@ -15137,12 +14816,11 @@ THREADED_TEST(GetHeapStatistics) {
class VisitorImpl : public v8::ExternalResourceVisitor {
public:
- explicit VisitorImpl(TestResource** resource) {
- for (int i = 0; i < 4; i++) {
- resource_[i] = resource[i];
- found_resource_[i] = false;
- }
- }
+ VisitorImpl(TestResource* r1, TestResource* r2)
+ : resource1_(r1),
+ resource2_(r2),
+ found_resource1_(false),
+ found_resource2_(false) {}
virtual ~VisitorImpl() {}
virtual void VisitExternalString(v8::Handle<v8::String> string) {
if (!string->IsExternal()) {
@@ -15152,22 +14830,25 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
v8::String::ExternalStringResource* resource =
string->GetExternalStringResource();
CHECK(resource);
- for (int i = 0; i < 4; i++) {
- if (resource_[i] == resource) {
- CHECK(!found_resource_[i]);
- found_resource_[i] = true;
- }
+ if (resource1_ == resource) {
+ CHECK(!found_resource1_);
+ found_resource1_ = true;
+ }
+ if (resource2_ == resource) {
+ CHECK(!found_resource2_);
+ found_resource2_ = true;
}
}
void CheckVisitedResources() {
- for (int i = 0; i < 4; i++) {
- CHECK(found_resource_[i]);
- }
+ CHECK(found_resource1_);
+ CHECK(found_resource2_);
}
private:
- v8::String::ExternalStringResource* resource_[4];
- bool found_resource_[4];
+ v8::String::ExternalStringResource* resource1_;
+ v8::String::ExternalStringResource* resource2_;
+ bool found_resource1_;
+ bool found_resource2_;
};
TEST(VisitExternalStrings) {
@@ -15175,33 +14856,16 @@ TEST(VisitExternalStrings) {
LocalContext env;
const char* string = "Some string";
uint16_t* two_byte_string = AsciiToTwoByteString(string);
- TestResource* resource[4];
- resource[0] = new TestResource(two_byte_string);
- v8::Local<v8::String> string0 = v8::String::NewExternal(resource[0]);
- resource[1] = new TestResource(two_byte_string);
- v8::Local<v8::String> string1 = v8::String::NewExternal(resource[1]);
-
- // Externalized symbol.
- resource[2] = new TestResource(two_byte_string);
- v8::Local<v8::String> string2 = v8::String::NewSymbol(string);
- CHECK(string2->MakeExternal(resource[2]));
-
- // Symbolized External.
- resource[3] = new TestResource(AsciiToTwoByteString("Some other string"));
- v8::Local<v8::String> string3 = v8::String::NewExternal(resource[3]);
- HEAP->CollectAllAvailableGarbage(); // Tenure string.
- // Turn into a symbol.
- i::Handle<i::String> string3_i = v8::Utils::OpenHandle(*string3);
- CHECK(!HEAP->LookupSymbol(*string3_i)->IsFailure());
- CHECK(string3_i->IsSymbol());
-
- // We need to add usages for string* to avoid warnings in GCC 4.7
- CHECK(string0->IsExternal());
+ TestResource* resource1 = new TestResource(two_byte_string);
+ v8::Local<v8::String> string1 = v8::String::NewExternal(resource1);
+ TestResource* resource2 = new TestResource(two_byte_string);
+ v8::Local<v8::String> string2 = v8::String::NewExternal(resource2);
+
+ // We need to add usages for string1 and string2 to avoid warnings in GCC 4.7
CHECK(string1->IsExternal());
CHECK(string2->IsExternal());
- CHECK(string3->IsExternal());
- VisitorImpl visitor(resource);
+ VisitorImpl visitor(resource1, resource2);
v8::V8::VisitExternalResources(&visitor);
visitor.CheckVisitedResources();
}
@@ -15891,13 +15555,13 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
CHECK(result->IsString());
i::Handle<i::String> string = v8::Utils::OpenHandle(String::Cast(*result));
int length = string->length();
- CHECK(string->IsOneByteRepresentation());
+ CHECK(string->IsAsciiRepresentation());
FlattenString(string);
i::Handle<i::String> flat_string = FlattenGetString(string);
- CHECK(string->IsOneByteRepresentation());
- CHECK(flat_string->IsOneByteRepresentation());
+ CHECK(string->IsAsciiRepresentation());
+ CHECK(flat_string->IsAsciiRepresentation());
// Create external resource.
uint16_t* uc16_buffer = new uint16_t[length + 1];
@@ -15916,7 +15580,7 @@ THREADED_TEST(TwoByteStringInAsciiCons) {
// ASCII characters). This is a valid sequence of steps, and it can happen
// in real pages.
- CHECK(string->IsOneByteRepresentation());
+ CHECK(string->IsAsciiRepresentation());
i::ConsString* cons = i::ConsString::cast(*string);
CHECK_EQ(0, cons->second()->length());
CHECK(cons->first()->IsTwoByteRepresentation());
@@ -18093,6 +17757,7 @@ class ThreadInterruptTest {
private:
ThreadInterruptTest* test_;
+ struct sigaction sa_;
};
i::Semaphore* sem_;
diff --git a/deps/v8/test/cctest/test-assembler-arm.cc b/deps/v8/test/cctest/test-assembler-arm.cc
index adec13b66..cdab1b95c 100644
--- a/deps/v8/test/cctest/test-assembler-arm.cc
+++ b/deps/v8/test/cctest/test-assembler-arm.cc
@@ -259,8 +259,6 @@ TEST(4) {
__ vadd(d5, d6, d7);
__ vstr(d5, r4, OFFSET_OF(T, c));
- __ vmla(d5, d6, d7);
-
__ vmov(r2, r3, d5);
__ vmov(d4, r2, r3);
__ vstr(d4, r4, OFFSET_OF(T, b));
@@ -349,7 +347,7 @@ TEST(4) {
CHECK_EQ(1.0, t.e);
CHECK_EQ(1.000000059604644775390625, t.d);
CHECK_EQ(4.25, t.c);
- CHECK_EQ(8.375, t.b);
+ CHECK_EQ(4.25, t.b);
CHECK_EQ(1.5, t.a);
}
}
diff --git a/deps/v8/test/cctest/test-compiler.cc b/deps/v8/test/cctest/test-compiler.cc
index 807adbf7f..7700a980d 100644
--- a/deps/v8/test/cctest/test-compiler.cc
+++ b/deps/v8/test/cctest/test-compiler.cc
@@ -100,11 +100,10 @@ static MaybeObject* GetGlobalProperty(const char* name) {
static void SetGlobalProperty(const char* name, Object* value) {
- Isolate* isolate = Isolate::Current();
Handle<Object> object(value);
Handle<String> symbol = FACTORY->LookupAsciiSymbol(name);
Handle<JSObject> global(Isolate::Current()->context()->global_object());
- SetProperty(isolate, global, symbol, object, NONE, kNonStrictMode);
+ SetProperty(global, symbol, object, NONE, kNonStrictMode);
}
diff --git a/deps/v8/test/cctest/test-debug.cc b/deps/v8/test/cctest/test-debug.cc
index 941fa688d..f2253e3a9 100644
--- a/deps/v8/test/cctest/test-debug.cc
+++ b/deps/v8/test/cctest/test-debug.cc
@@ -27,9 +27,6 @@
#ifdef ENABLE_DEBUGGER_SUPPORT
-// TODO(svenpanne): Do not use Context::GetData and Context::SetData.
-#define V8_DISABLE_DEPRECATIONS 1
-
#include <stdlib.h>
#include "v8.h"
@@ -146,8 +143,7 @@ class DebugLocalContext {
inline v8::Context* operator*() { return *context_; }
inline bool IsReady() { return !context_.IsEmpty(); }
void ExposeDebug() {
- v8::internal::Isolate* isolate = v8::internal::Isolate::Current();
- v8::internal::Debug* debug = isolate->debug();
+ v8::internal::Debug* debug = v8::internal::Isolate::Current()->debug();
// Expose the debug context global object in the global object for testing.
debug->Load();
debug->debug_context()->set_security_token(
@@ -157,7 +153,7 @@ class DebugLocalContext {
v8::Utils::OpenHandle(*context_->Global())));
Handle<v8::internal::String> debug_string =
FACTORY->LookupAsciiSymbol("debug");
- SetProperty(isolate, global, debug_string,
+ SetProperty(global, debug_string,
Handle<Object>(debug->debug_context()->global_proxy()), DONT_ENUM,
::v8::internal::kNonStrictMode);
}
diff --git a/deps/v8/test/cctest/test-decls.cc b/deps/v8/test/cctest/test-decls.cc
index 824c4e764..6fc601213 100644
--- a/deps/v8/test/cctest/test-decls.cc
+++ b/deps/v8/test/cctest/test-decls.cc
@@ -190,8 +190,7 @@ v8::Handle<Integer> DeclarationContext::HandleQuery(Local<String> key,
DeclarationContext* DeclarationContext::GetInstance(const AccessorInfo& info) {
- void* value = External::Cast(*info.Data())->Value();
- return static_cast<DeclarationContext*>(value);
+ return static_cast<DeclarationContext*>(External::Unwrap(info.Data()));
}
@@ -735,7 +734,7 @@ class SimpleContext {
};
-TEST(CrossScriptReferences) {
+TEST(MultiScriptConflicts) {
HandleScope scope;
{ SimpleContext context;
@@ -773,70 +772,135 @@ TEST(CrossScriptReferences) {
context.Check("function x() { return 7 }; x",
EXPECT_EXCEPTION);
}
-}
-
-TEST(CrossScriptReferencesHarmony) {
i::FLAG_use_strict = true;
i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
- HandleScope scope;
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("this.x",
+ EXPECT_RESULT, Number::New(1));
+ }
- const char* decs[] = {
- "var x = 1; x", "x", "this.x",
- "function x() { return 1 }; x()", "x()", "this.x()",
- "let x = 1; x", "x", "this.x",
- "const x = 1; x", "x", "this.x",
- "module x { export let a = 1 }; x.a", "x.a", "this.x.a",
- NULL
- };
+ { SimpleContext context;
+ context.Check("function x() { return 4 }; x()",
+ EXPECT_RESULT, Number::New(4));
+ context.Check("x()",
+ EXPECT_RESULT, Number::New(4));
+ context.Check("this.x()",
+ EXPECT_RESULT, Number::New(4));
+ }
- for (int i = 0; decs[i] != NULL; i += 3) {
- SimpleContext context;
- context.Check(decs[i], EXPECT_RESULT, Number::New(1));
- context.Check(decs[i+1], EXPECT_RESULT, Number::New(1));
+ { SimpleContext context;
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(2));
// TODO(rossberg): The current ES6 draft spec does not reflect lexical
// bindings on the global object. However, this will probably change, in
// which case we reactivate the following test.
- if (i/3 < 2) context.Check(decs[i+2], EXPECT_RESULT, Number::New(1));
+ // context.Check("this.x",
+ // EXPECT_RESULT, Number::New(2));
}
-}
+ { SimpleContext context;
+ context.Check("const x = 3; x",
+ EXPECT_RESULT, Number::New(3));
+ context.Check("x",
+ EXPECT_RESULT, Number::New(3));
+ // TODO(rossberg): The current ES6 draft spec does not reflect lexical
+ // bindings on the global object. However, this will probably change, in
+ // which case we reactivate the following test.
+ // context.Check("this.x",
+ // EXPECT_RESULT, Number::New(3));
+ }
-TEST(CrossScriptConflicts) {
- i::FLAG_use_strict = true;
- i::FLAG_harmony_scoping = true;
- i::FLAG_harmony_modules = true;
+ // TODO(rossberg): All of the below should actually be errors in Harmony.
- HandleScope scope;
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
- const char* firsts[] = {
- "var x = 1; x",
- "function x() { return 1 }; x()",
- "let x = 1; x",
- "const x = 1; x",
- "module x { export let a = 1 }; x.a",
- NULL
- };
- const char* seconds[] = {
- "var x = 2; x",
- "function x() { return 2 }; x()",
- "let x = 2; x",
- "const x = 2; x",
- "module x { export let a = 2 }; x.a",
- NULL
- };
+ { SimpleContext context;
+ context.Check("var x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
- for (int i = 0; firsts[i] != NULL; ++i) {
- for (int j = 0; seconds[j] != NULL; ++j) {
- SimpleContext context;
- context.Check(firsts[i], EXPECT_RESULT, Number::New(1));
- // TODO(rossberg): All tests should actually be errors in Harmony,
- // but we currently do not detect the cases where the first declaration
- // is not lexical.
- context.Check(seconds[j],
- i < 2 ? EXPECT_RESULT : EXPECT_ERROR, Number::New(2));
- }
+ { SimpleContext context;
+ context.Check("function x() { return 1 }; x()",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("function x() { return 1 }; x()",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_RESULT, Number::New(2));
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("let x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("function x() { return 2 }; x()",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("var x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("let x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("const x = 2; x",
+ EXPECT_ERROR);
+ }
+
+ { SimpleContext context;
+ context.Check("const x = 1; x",
+ EXPECT_RESULT, Number::New(1));
+ context.Check("function x() { return 2 }; x()",
+ EXPECT_ERROR);
}
}
diff --git a/deps/v8/test/cctest/test-dictionary.cc b/deps/v8/test/cctest/test-dictionary.cc
index 2acd4e664..00e38333f 100644
--- a/deps/v8/test/cctest/test-dictionary.cc
+++ b/deps/v8/test/cctest/test-dictionary.cc
@@ -114,8 +114,7 @@ TEST(ObjectHashSetCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(HEAP->new_space());
- SimulateFullSpace(HEAP->old_pointer_space());
+ FLAG_gc_interval = 0;
// Calling Contains() should not cause GC ever.
CHECK(!table->Contains(*key));
@@ -144,8 +143,7 @@ TEST(ObjectHashTableCausesGC) {
// Simulate a full heap so that generating an identity hash code
// in subsequent calls will request GC.
- SimulateFullSpace(HEAP->new_space());
- SimulateFullSpace(HEAP->old_pointer_space());
+ FLAG_gc_interval = 0;
// Calling Lookup() should not cause GC ever.
CHECK(table->Lookup(*key)->IsTheHole());
diff --git a/deps/v8/test/cctest/test-disasm-arm.cc b/deps/v8/test/cctest/test-disasm-arm.cc
index 0ac3c5a94..3a2d9e836 100644
--- a/deps/v8/test/cctest/test-disasm-arm.cc
+++ b/deps/v8/test/cctest/test-disasm-arm.cc
@@ -547,11 +547,6 @@ TEST(Vfp) {
"ec860a20 vstmia r6, {s0-s31}");
COMPARE(vldm(ia, r7, s0, s31),
"ec970a20 vldmia r7, {s0-s31}");
-
- COMPARE(vmla(d2, d1, d0),
- "ee012b00 vmla.f64 d2, d1, d0");
- COMPARE(vmla(d6, d4, d5, cc),
- "3e046b05 vmla.f64cc d6, d4, d5");
}
VERIFY_RUN();
@@ -758,3 +753,4 @@ TEST(LoadStore) {
VERIFY_RUN();
}
+
diff --git a/deps/v8/test/cctest/test-heap-profiler.cc b/deps/v8/test/cctest/test-heap-profiler.cc
index 4a65344aa..52359711d 100644
--- a/deps/v8/test/cctest/test-heap-profiler.cc
+++ b/deps/v8/test/cctest/test-heap-profiler.cc
@@ -1015,6 +1015,7 @@ class TestRetainedObjectInfo : public v8::RetainedObjectInfo {
private:
bool disposed_;
+ int category_;
int hash_;
const char* group_label_;
const char* label_;
@@ -1227,33 +1228,6 @@ TEST(DeleteHeapSnapshot) {
}
-class NameResolver : public v8::HeapProfiler::ObjectNameResolver {
- public:
- virtual const char* GetName(v8::Handle<v8::Object> object) {
- return "Global object name";
- }
-};
-
-TEST(GlobalObjectName) {
- v8::HandleScope scope;
- LocalContext env;
-
- CompileRun("document = { URL:\"abcdefgh\" };");
-
- NameResolver name_resolver;
- const v8::HeapSnapshot* snapshot =
- v8::HeapProfiler::TakeSnapshot(v8_str("document"),
- v8::HeapSnapshot::kFull,
- NULL,
- &name_resolver);
- const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
- CHECK_NE(NULL, global);
- CHECK_EQ("Object / Global object name" ,
- const_cast<i::HeapEntry*>(
- reinterpret_cast<const i::HeapEntry*>(global))->name());
-}
-
-
TEST(DocumentURL) {
v8::HandleScope scope;
LocalContext env;
diff --git a/deps/v8/test/cctest/test-heap.cc b/deps/v8/test/cctest/test-heap.cc
index 533a1c3ef..6927c43b9 100644
--- a/deps/v8/test/cctest/test-heap.cc
+++ b/deps/v8/test/cctest/test-heap.cc
@@ -23,21 +23,6 @@ static void InitializeVM() {
}
-// Go through all incremental marking steps in one swoop.
-static void SimulateIncrementalMarking() {
- IncrementalMarking* marking = HEAP->incremental_marking();
- CHECK(marking->IsMarking() || marking->IsStopped());
- if (marking->IsStopped()) {
- marking->Start();
- }
- CHECK(marking->IsMarking());
- while (!marking->IsComplete()) {
- marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
- }
- CHECK(marking->IsComplete());
-}
-
-
static void CheckMap(Map* map, int type, int instance_size) {
CHECK(map->IsHeapObject());
#ifdef DEBUG
@@ -415,10 +400,9 @@ TEST(WeakGlobalHandlesMark) {
h2 = global_handles->Create(*u);
}
- // Make sure the objects are promoted.
HEAP->CollectGarbage(OLD_POINTER_SPACE);
HEAP->CollectGarbage(NEW_SPACE);
- CHECK(!HEAP->InNewSpace(*h1) && !HEAP->InNewSpace(*h2));
+ // Make sure the object is promoted.
global_handles->MakeWeak(h2.location(),
reinterpret_cast<void*>(1234),
@@ -426,8 +410,7 @@ TEST(WeakGlobalHandlesMark) {
CHECK(!GlobalHandles::IsNearDeath(h1.location()));
CHECK(!GlobalHandles::IsNearDeath(h2.location()));
- // Incremental marking potentially marked handles before they turned weak.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ HEAP->CollectGarbage(OLD_POINTER_SPACE);
CHECK((*h1)->IsString());
@@ -959,9 +942,9 @@ TEST(Regression39128) {
TEST(TestCodeFlushing) {
+ i::FLAG_allow_natives_syntax = true;
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
- i::FLAG_allow_natives_syntax = true;
InitializeVM();
v8::HandleScope scope;
const char* source = "function foo() {"
@@ -984,217 +967,26 @@ TEST(TestCodeFlushing) {
Handle<JSFunction> function(JSFunction::cast(func_value));
CHECK(function->shared()->is_compiled());
- // The code will survive at least two GCs.
+ // TODO(1609) Currently incremental marker does not support code flushing.
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK(function->shared()->is_compiled());
-
- // Simulate several GCs that use full marking.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- }
- // foo should no longer be in the compilation cache
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
- // Call foo to get it recompiled.
- CompileRun("foo()");
CHECK(function->shared()->is_compiled());
- CHECK(function->is_compiled());
-}
-
-TEST(TestCodeFlushingIncremental) {
- // If we do not flush code this test is invalid.
- if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
- i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope;
- const char* source = "function foo() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo()";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
-
- // This compile will add the code to the compilation cache.
- { v8::HandleScope scope;
- CompileRun(source);
- }
-
- // Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
- GetProperty(*foo_name)->ToObjectChecked();
- CHECK(func_value->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(func_value));
- CHECK(function->shared()->is_compiled());
-
- // The code will survive at least two GCs.
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- CHECK(function->shared()->is_compiled());
-
- // Simulate several GCs that use incremental marking.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- SimulateIncrementalMarking();
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- }
- CHECK(!function->shared()->is_compiled() || function->IsOptimized());
- CHECK(!function->is_compiled() || function->IsOptimized());
-
- // This compile will compile the function again.
- { v8::HandleScope scope;
- CompileRun("foo();");
- }
-
- // Simulate several GCs that use incremental marking but make sure
- // the loop breaks once the function is enqueued as a candidate.
- for (int i = 0; i < kAgingThreshold; i++) {
- SimulateIncrementalMarking();
- if (!function->next_function_link()->IsUndefined()) break;
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- }
-
- // Force optimization while incremental marking is active and while
- // the function is enqueued as a candidate.
- { v8::HandleScope scope;
- CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
- }
-
- // Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(function->shared()->is_compiled() || !function->IsOptimized());
- CHECK(function->is_compiled() || !function->IsOptimized());
-}
-
-
-TEST(TestCodeFlushingIncrementalScavenge) {
- // If we do not flush code this test is invalid.
- if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
- i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope;
- const char* source = "var foo = function() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo();"
- "var bar = function() {"
- " var x = 23;"
- "};"
- "bar();";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
- Handle<String> bar_name = FACTORY->LookupAsciiSymbol("bar");
-
- // Perfrom one initial GC to enable code flushing.
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- // This compile will add the code to the compilation cache.
- { v8::HandleScope scope;
- CompileRun(source);
- }
-
- // Check functions are compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
- GetProperty(*foo_name)->ToObjectChecked();
- CHECK(func_value->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(func_value));
- CHECK(function->shared()->is_compiled());
- Object* func_value2 = Isolate::Current()->context()->global_object()->
- GetProperty(*bar_name)->ToObjectChecked();
- CHECK(func_value2->IsJSFunction());
- Handle<JSFunction> function2(JSFunction::cast(func_value2));
- CHECK(function2->shared()->is_compiled());
-
- // Clear references to functions so that one of them can die.
- { v8::HandleScope scope;
- CompileRun("foo = 0; bar = 0;");
- }
-
- // Bump the code age so that flushing is triggered while the function
- // object is still located in new-space.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- function->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- function2->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- // Simulate incremental marking so that the functions are enqueued as
- // code flushing candidates. Then kill one of the functions. Finally
- // perform a scavenge while incremental marking is still running.
- SimulateIncrementalMarking();
- *function2.location() = NULL;
- HEAP->CollectGarbage(NEW_SPACE, "test scavenge while marking");
-
- // Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
+ // foo should no longer be in the compilation cache
CHECK(!function->shared()->is_compiled() || function->IsOptimized());
CHECK(!function->is_compiled() || function->IsOptimized());
-}
-
-
-TEST(TestCodeFlushingIncrementalAbort) {
- // If we do not flush code this test is invalid.
- if (!FLAG_flush_code || !FLAG_flush_code_incrementally) return;
- i::FLAG_allow_natives_syntax = true;
- InitializeVM();
- v8::HandleScope scope;
- const char* source = "function foo() {"
- " var x = 42;"
- " var y = 42;"
- " var z = x + y;"
- "};"
- "foo()";
- Handle<String> foo_name = FACTORY->LookupAsciiSymbol("foo");
-
- // This compile will add the code to the compilation cache.
- { v8::HandleScope scope;
- CompileRun(source);
- }
-
- // Check function is compiled.
- Object* func_value = Isolate::Current()->context()->global_object()->
- GetProperty(*foo_name)->ToObjectChecked();
- CHECK(func_value->IsJSFunction());
- Handle<JSFunction> function(JSFunction::cast(func_value));
- CHECK(function->shared()->is_compiled());
-
- // The code will survive at least two GCs.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+ // Call foo to get it recompiled.
+ CompileRun("foo()");
CHECK(function->shared()->is_compiled());
-
- // Bump the code age so that flushing is triggered.
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- function->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- // Simulate incremental marking so that the function is enqueued as
- // code flushing candidate.
- SimulateIncrementalMarking();
-
- // Enable the debugger and add a breakpoint while incremental marking
- // is running so that incremental marking aborts and code flushing is
- // disabled.
- int position = 0;
- Handle<Object> breakpoint_object(Smi::FromInt(0));
- ISOLATE->debug()->SetBreakPoint(function, breakpoint_object, &position);
- ISOLATE->debug()->ClearAllBreakPoints();
-
- // Force optimization now that code flushing is disabled.
- { v8::HandleScope scope;
- CompileRun("%OptimizeFunctionOnNextCall(foo); foo();");
- }
-
- // Simulate one final GC to make sure the candidate queue is sane.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(function->shared()->is_compiled() || !function->IsOptimized());
- CHECK(function->is_compiled() || !function->IsOptimized());
+ CHECK(function->is_compiled());
}
@@ -1227,10 +1019,6 @@ static int CountOptimizedUserFunctions(v8::Handle<v8::Context> context) {
TEST(TestInternalWeakLists) {
v8::V8::Initialize();
- // Some flags turn Scavenge collections into Mark-sweep collections
- // and hence are incompatible with this test case.
- if (FLAG_gc_global || FLAG_stress_compaction) return;
-
static const int kNumTestContexts = 10;
v8::HandleScope scope;
@@ -1743,7 +1531,6 @@ TEST(InstanceOfStubWriteBarrier) {
InitializeVM();
if (!i::V8::UseCrankshaft()) return;
- if (i::FLAG_force_marking_deque_overflows) return;
v8::HandleScope outer_scope;
{
@@ -1829,11 +1616,10 @@ TEST(PrototypeTransitionClearing) {
// Make sure next prototype is placed on an old-space evacuation candidate.
Handle<JSObject> prototype;
PagedSpace* space = HEAP->old_pointer_space();
- {
- AlwaysAllocateScope always_allocate;
- SimulateFullSpace(space);
+ do {
prototype = FACTORY->NewJSArray(32 * KB, FAST_HOLEY_ELEMENTS, TENURED);
- }
+ } while (space->FirstPage() == space->LastPage() ||
+ !space->LastPage()->Contains(prototype->address()));
// Add a prototype on an evacuation candidate and verify that transition
// clearing correctly records slots in prototype transition array.
@@ -1952,10 +1738,9 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
i::FLAG_allow_natives_syntax = true;
InitializeVM();
if (!i::V8::UseCrankshaft() || i::FLAG_always_opt) return;
- if (i::FLAG_gc_global || i::FLAG_stress_compaction) return;
v8::HandleScope scope;
- SimulateFullSpace(HEAP->new_space());
+ FillUpNewSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
v8::Local<v8::Value> res = CompileRun(
"function c(x) {"
@@ -1982,6 +1767,19 @@ static int CountMapTransitions(Map* map) {
}
+// Go through all incremental marking steps in one swoop.
+static void SimulateIncrementalMarking() {
+ IncrementalMarking* marking = HEAP->incremental_marking();
+ CHECK(marking->IsStopped());
+ marking->Start();
+ CHECK(marking->IsMarking());
+ while (!marking->IsComplete()) {
+ marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+ }
+ CHECK(marking->IsComplete());
+}
+
+
// Test that map transitions are cleared and maps are collected with
// incremental marking as well.
TEST(Regress1465) {
@@ -2106,6 +1904,10 @@ TEST(Regress2143b) {
}
+// Implemented in the test-alloc.cc test suite.
+void SimulateFullSpace(PagedSpace* space);
+
+
TEST(ReleaseOverReservedPages) {
i::FLAG_trace_gc = true;
// The optimizer can allocate stuff, messing up the test.
@@ -2128,7 +1930,7 @@ TEST(ReleaseOverReservedPages) {
// Triggering one GC will cause a lot of garbage to be discovered but
// even spread across all allocated pages.
HEAP->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
- CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
+ CHECK_EQ(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
// Triggering subsequent GCs should cause at least half of the pages
// to be released to the OS after at most two cycles.
@@ -2159,22 +1961,27 @@ TEST(Regress2237) {
v8::HandleScope inner_scope;
const char* c = "This text is long enough to trigger sliced strings.";
Handle<String> s = FACTORY->NewStringFromAscii(CStrVector(c));
- CHECK(s->IsSeqOneByteString());
+ CHECK(s->IsSeqAsciiString());
CHECK(HEAP->InNewSpace(*s));
// Generate a sliced string that is based on the above parent and
// lives in old-space.
- SimulateFullSpace(HEAP->new_space());
+ FillUpNewSpace(HEAP->new_space());
AlwaysAllocateScope always_allocate;
- Handle<String> t = FACTORY->NewProperSubString(s, 5, 35);
+ Handle<String> t;
+ // TODO(mstarzinger): Unfortunately FillUpNewSpace() still leaves
+ // some slack, so we need to allocate a few sliced strings.
+ for (int i = 0; i < 16; i++) {
+ t = FACTORY->NewProperSubString(s, 5, 35);
+ }
CHECK(t->IsSlicedString());
CHECK(!HEAP->InNewSpace(*t));
*slice.location() = *t.location();
}
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
+ CHECK(SlicedString::cast(*slice)->parent()->IsSeqAsciiString());
}
@@ -2414,13 +2221,19 @@ class SourceResource: public v8::String::ExternalAsciiStringResource {
};
-void ReleaseStackTraceDataTest(const char* source) {
+TEST(ReleaseStackTraceData) {
// Test that the data retained by the Error.stack accessor is released
// after the first time the accessor is fired. We use external string
// to check whether the data is being released since the external string
// resource's callback is fired when the external string is GC'ed.
InitializeVM();
v8::HandleScope scope;
+ static const char* source = "var error = 1; "
+ "try { "
+ " throw new Error(); "
+ "} catch (e) { "
+ " error = e; "
+ "} ";
SourceResource* resource = new SourceResource(i::StrDup(source));
{
v8::HandleScope scope;
@@ -2432,29 +2245,12 @@ void ReleaseStackTraceDataTest(const char* source) {
// External source is being retained by the stack trace.
CHECK(!resource->IsDisposed());
- CompileRun("error.stack;");
+ CompileRun("error.stack; error.stack;");
HEAP->CollectAllAvailableGarbage();
// External source has been released.
CHECK(resource->IsDisposed());
- delete resource;
-}
-
-TEST(ReleaseStackTraceData) {
- static const char* source1 = "var error = null; "
- /* Normal Error */ "try { "
- " throw new Error(); "
- "} catch (e) { "
- " error = e; "
- "} ";
- static const char* source2 = "var error = null; "
- /* Stack overflow */ "try { "
- " (function f() { f(); })(); "
- "} catch (e) { "
- " error = e; "
- "} ";
- ReleaseStackTraceDataTest(source1);
- ReleaseStackTraceDataTest(source2);
+ delete resource;
}
@@ -2515,188 +2311,3 @@ TEST(Regression144230) {
USE(global->SetProperty(*name, *call_function, NONE, kNonStrictMode));
CompileRun("call();");
}
-
-
-TEST(Regress159140) {
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_flush_code_incrementally = true;
- InitializeVM();
- v8::HandleScope scope;
-
- // Perform one initial GC to enable code flushing.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
- // Prepare several closures that are all eligible for code flushing
- // because all reachable ones are not optimized. Make sure that the
- // optimized code object is directly reachable through a handle so
- // that it is marked black during incremental marking.
- Handle<Code> code;
- {
- HandleScope inner_scope;
- CompileRun("function h(x) {}"
- "function mkClosure() {"
- " return function(x) { return x + 1; };"
- "}"
- "var f = mkClosure();"
- "var g = mkClosure();"
- "f(1); f(2);"
- "g(1); g(2);"
- "h(1); h(2);"
- "%OptimizeFunctionOnNextCall(f); f(3);"
- "%OptimizeFunctionOnNextCall(h); h(3);");
-
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
- CHECK(f->is_compiled());
- CompileRun("f = null;");
-
- Handle<JSFunction> g =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("g"))));
- CHECK(g->is_compiled());
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- g->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- code = inner_scope.CloseAndEscape(Handle<Code>(f->code()));
- }
-
- // Simulate incremental marking so that the functions are enqueued as
- // code flushing candidates. Then optimize one function. Finally
- // finish the GC to complete code flushing.
- SimulateIncrementalMarking();
- CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
-
- // Unoptimized code is missing and the deoptimizer will go ballistic.
- CompileRun("g('bozo');");
-}
-
-
-TEST(Regress169209) {
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_flush_code_incrementally = true;
- InitializeVM();
- v8::HandleScope scope;
-
- // Perform one initial GC to enable code flushing.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
- // Prepare a shared function info eligible for code flushing for which
- // the unoptimized code will be replaced during optimization.
- Handle<SharedFunctionInfo> shared1;
- {
- HandleScope inner_scope;
- CompileRun("function f() { return 'foobar'; }"
- "function g(x) { if (x) f(); }"
- "f();"
- "g(false);"
- "g(false);");
-
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
- CHECK(f->is_compiled());
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- shared1 = inner_scope.CloseAndEscape(handle(f->shared(), ISOLATE));
- }
-
- // Prepare a shared function info eligible for code flushing that will
- // represent the dangling tail of the candidate list.
- Handle<SharedFunctionInfo> shared2;
- {
- HandleScope inner_scope;
- CompileRun("function flushMe() { return 0; }"
- "flushMe(1);");
-
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("flushMe"))));
- CHECK(f->is_compiled());
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- shared2 = inner_scope.CloseAndEscape(handle(f->shared(), ISOLATE));
- }
-
- // Simulate incremental marking and collect code flushing candidates.
- SimulateIncrementalMarking();
- CHECK(shared1->code()->gc_metadata() != NULL);
-
- // Optimize function and make sure the unoptimized code is replaced.
-#ifdef DEBUG
- FLAG_stop_at = "f";
-#endif
- CompileRun("%OptimizeFunctionOnNextCall(g);"
- "g(false);");
-
- // Finish garbage collection cycle.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- CHECK(shared1->code()->gc_metadata() == NULL);
-}
-
-
-TEST(Regress168801) {
- i::FLAG_always_compact = true;
- i::FLAG_cache_optimized_code = false;
- i::FLAG_allow_natives_syntax = true;
- i::FLAG_flush_code_incrementally = true;
- InitializeVM();
- v8::HandleScope scope;
-
- // Perform one initial GC to enable code flushing.
- HEAP->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
- // Ensure the code ends up on an evacuation candidate.
- SimulateFullSpace(HEAP->code_space());
-
- // Prepare an unoptimized function that is eligible for code flushing.
- Handle<JSFunction> function;
- {
- HandleScope inner_scope;
- CompileRun("function mkClosure() {"
- " return function(x) { return x + 1; };"
- "}"
- "var f = mkClosure();"
- "f(1); f(2);");
-
- Handle<JSFunction> f =
- v8::Utils::OpenHandle(
- *v8::Handle<v8::Function>::Cast(
- v8::Context::GetCurrent()->Global()->Get(v8_str("f"))));
- CHECK(f->is_compiled());
- const int kAgingThreshold = 6;
- for (int i = 0; i < kAgingThreshold; i++) {
- f->shared()->code()->MakeOlder(static_cast<MarkingParity>(i % 2));
- }
-
- function = inner_scope.CloseAndEscape(handle(*f, ISOLATE));
- }
-
- // Simulate incremental marking so that unoptimized function is enqueued as a
- // candidate for code flushing. The shared function info however will not be
- // explicitly enqueued.
- SimulateIncrementalMarking();
-
- // Now optimize the function so that it is taken off the candidate list.
- {
- HandleScope inner_scope;
- CompileRun("%OptimizeFunctionOnNextCall(f); f(3);");
- }
-
- // This cycle will bust the heap and subsequent cycles will go ballistic.
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
- HEAP->CollectAllGarbage(Heap::kNoGCFlags);
-}
diff --git a/deps/v8/test/cctest/test-lockers.cc b/deps/v8/test/cctest/test-lockers.cc
index 57f717849..5035f8764 100644
--- a/deps/v8/test/cctest/test-lockers.cc
+++ b/deps/v8/test/cctest/test-lockers.cc
@@ -59,9 +59,9 @@ using ::v8::V8;
class KangarooThread : public v8::internal::Thread {
public:
KangarooThread(v8::Isolate* isolate,
- v8::Handle<v8::Context> context)
+ v8::Handle<v8::Context> context, int value)
: Thread("KangarooThread"),
- isolate_(isolate), context_(context) {
+ isolate_(isolate), context_(context), value_(value) {
}
void Run() {
@@ -90,6 +90,7 @@ class KangarooThread : public v8::internal::Thread {
private:
v8::Isolate* isolate_;
Persistent<v8::Context> context_;
+ int value_;
};
// Migrates an isolate from one thread to another
@@ -105,7 +106,7 @@ TEST(KangarooIsolates) {
CHECK_EQ(isolate, v8::internal::Isolate::Current());
CompileRun("function getValue() { return 30; }");
}
- KangarooThread thread1(isolate, context);
+ KangarooThread thread1(isolate, context, 1);
thread1.Start();
thread1.Join();
}
diff --git a/deps/v8/test/cctest/test-log.cc b/deps/v8/test/cctest/test-log.cc
index 892a54222..6f2324dbb 100644
--- a/deps/v8/test/cctest/test-log.cc
+++ b/deps/v8/test/cctest/test-log.cc
@@ -392,7 +392,7 @@ TEST(LogCallbacks) {
i::EmbeddedVector<char, 100> ref_data;
i::OS::SNPrintF(ref_data,
- "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"method1\"\0",
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"method1\"\0",
ObjMethod1);
CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
@@ -435,21 +435,21 @@ TEST(LogAccessorCallbacks) {
EmbeddedVector<char, 100> prop1_getter_record;
i::OS::SNPrintF(prop1_getter_record,
- "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"get prop1\"",
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop1\"",
Prop1Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop1_getter_record.start(), log.length()));
EmbeddedVector<char, 100> prop1_setter_record;
i::OS::SNPrintF(prop1_setter_record,
- "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"set prop1\"",
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"set prop1\"",
Prop1Setter);
CHECK_NE(NULL,
StrNStr(log.start(), prop1_setter_record.start(), log.length()));
EmbeddedVector<char, 100> prop2_getter_record;
i::OS::SNPrintF(prop2_getter_record,
- "code-creation,Callback,-3,0x%" V8PRIxPTR ",1,\"get prop2\"",
+ "code-creation,Callback,0x%" V8PRIxPTR ",1,\"get prop2\"",
Prop2Getter);
CHECK_NE(NULL,
StrNStr(log.start(), prop2_getter_record.start(), log.length()));
diff --git a/deps/v8/test/cctest/test-mark-compact.cc b/deps/v8/test/cctest/test-mark-compact.cc
index 69abd8d68..c0ab763a2 100644
--- a/deps/v8/test/cctest/test-mark-compact.cc
+++ b/deps/v8/test/cctest/test-mark-compact.cc
@@ -311,7 +311,6 @@ static void WeakPointerCallback(v8::Persistent<v8::Value> handle, void* id) {
}
TEST(ObjectGroups) {
- FLAG_incremental_marking = false;
InitializeVM();
GlobalHandles* global_handles = Isolate::Current()->global_handles();
diff --git a/deps/v8/test/cctest/test-object-observe.cc b/deps/v8/test/cctest/test-object-observe.cc
deleted file mode 100644
index 25e5557a8..000000000
--- a/deps/v8/test/cctest/test-object-observe.cc
+++ /dev/null
@@ -1,280 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "v8.h"
-
-#include "cctest.h"
-
-using namespace v8;
-
-namespace {
-// Need to create a new isolate when FLAG_harmony_observation is on.
-class HarmonyIsolate {
- public:
- HarmonyIsolate() {
- i::FLAG_harmony_observation = true;
- isolate_ = Isolate::New();
- isolate_->Enter();
- }
-
- ~HarmonyIsolate() {
- isolate_->Exit();
- isolate_->Dispose();
- }
-
- private:
- Isolate* isolate_;
-};
-}
-
-TEST(PerIsolateState) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context1;
- CompileRun(
- "var count = 0;"
- "var calls = 0;"
- "var observer = function(records) { count = records.length; calls++ };"
- "var obj = {};"
- "Object.observe(obj, observer);");
- Handle<Value> observer = CompileRun("observer");
- Handle<Value> obj = CompileRun("obj");
- Handle<Value> notify_fun1 = CompileRun(
- "(function() { obj.foo = 'bar'; })");
- Handle<Value> notify_fun2;
- {
- LocalContext context2;
- context2->Global()->Set(String::New("obj"), obj);
- notify_fun2 = CompileRun(
- "(function() { obj.foo = 'baz'; })");
- }
- Handle<Value> notify_fun3;
- {
- LocalContext context3;
- context3->Global()->Set(String::New("obj"), obj);
- notify_fun3 = CompileRun(
- "(function() { obj.foo = 'bat'; })");
- }
- {
- LocalContext context4;
- context4->Global()->Set(String::New("observer"), observer);
- context4->Global()->Set(String::New("fun1"), notify_fun1);
- context4->Global()->Set(String::New("fun2"), notify_fun2);
- context4->Global()->Set(String::New("fun3"), notify_fun3);
- CompileRun("fun1(); fun2(); fun3(); Object.deliverChangeRecords(observer)");
- }
- CHECK_EQ(1, CompileRun("calls")->Int32Value());
- CHECK_EQ(3, CompileRun("count")->Int32Value());
-}
-
-TEST(EndOfMicrotaskDelivery) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context;
- CompileRun(
- "var obj = {};"
- "var count = 0;"
- "var observer = function(records) { count = records.length };"
- "Object.observe(obj, observer);"
- "obj.foo = 'bar';");
- CHECK_EQ(1, CompileRun("count")->Int32Value());
-}
-
-TEST(DeliveryOrdering) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context;
- CompileRun(
- "var obj1 = {};"
- "var obj2 = {};"
- "var ordering = [];"
- "function observer2() { ordering.push(2); };"
- "function observer1() { ordering.push(1); };"
- "function observer3() { ordering.push(3); };"
- "Object.observe(obj1, observer1);"
- "Object.observe(obj1, observer2);"
- "Object.observe(obj1, observer3);"
- "obj1.foo = 'bar';");
- CHECK_EQ(3, CompileRun("ordering.length")->Int32Value());
- CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
- CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
- CompileRun(
- "ordering = [];"
- "Object.observe(obj2, observer3);"
- "Object.observe(obj2, observer2);"
- "Object.observe(obj2, observer1);"
- "obj2.foo = 'baz'");
- CHECK_EQ(3, CompileRun("ordering.length")->Int32Value());
- CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
- CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
-}
-
-TEST(DeliveryOrderingReentrant) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context;
- CompileRun(
- "var obj = {};"
- "var reentered = false;"
- "var ordering = [];"
- "function observer1() { ordering.push(1); };"
- "function observer2() {"
- " if (!reentered) {"
- " obj.foo = 'baz';"
- " reentered = true;"
- " }"
- " ordering.push(2);"
- "};"
- "function observer3() { ordering.push(3); };"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer2);"
- "Object.observe(obj, observer3);"
- "obj.foo = 'bar';");
- CHECK_EQ(5, CompileRun("ordering.length")->Int32Value());
- CHECK_EQ(1, CompileRun("ordering[0]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
- CHECK_EQ(3, CompileRun("ordering[2]")->Int32Value());
- // Note that we re-deliver to observers 1 and 2, while observer3
- // already received the second record during the first round.
- CHECK_EQ(1, CompileRun("ordering[3]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[1]")->Int32Value());
-}
-
-TEST(DeliveryOrderingDeliverChangeRecords) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context;
- CompileRun(
- "var obj = {};"
- "var ordering = [];"
- "function observer1() { ordering.push(1); if (!obj.b) obj.b = true };"
- "function observer2() { ordering.push(2); };"
- "Object.observe(obj, observer1);"
- "Object.observe(obj, observer2);"
- "obj.a = 1;"
- "Object.deliverChangeRecords(observer2);");
- CHECK_EQ(4, CompileRun("ordering.length")->Int32Value());
- // First, observer2 is called due to deliverChangeRecords
- CHECK_EQ(2, CompileRun("ordering[0]")->Int32Value());
- // Then, observer1 is called when the stack unwinds
- CHECK_EQ(1, CompileRun("ordering[1]")->Int32Value());
- // observer1's mutation causes both 1 and 2 to be reactivated,
- // with 1 having priority.
- CHECK_EQ(1, CompileRun("ordering[2]")->Int32Value());
- CHECK_EQ(2, CompileRun("ordering[3]")->Int32Value());
-}
-
-TEST(ObjectHashTableGrowth) {
- HarmonyIsolate isolate;
- HandleScope scope;
- // Initializing this context sets up initial hash tables.
- LocalContext context;
- Handle<Value> obj = CompileRun("obj = {};");
- Handle<Value> observer = CompileRun(
- "var ran = false;"
- "(function() { ran = true })");
- {
- // As does initializing this context.
- LocalContext context2;
- context2->Global()->Set(String::New("obj"), obj);
- context2->Global()->Set(String::New("observer"), observer);
- CompileRun(
- "var objArr = [];"
- // 100 objects should be enough to make the hash table grow
- // (and thus relocate).
- "for (var i = 0; i < 100; ++i) {"
- " objArr.push({});"
- " Object.observe(objArr[objArr.length-1], function(){});"
- "}"
- "Object.observe(obj, observer);");
- }
- // obj is now marked "is_observed", but our map has moved.
- CompileRun("obj.foo = 'bar'");
- CHECK(CompileRun("ran")->BooleanValue());
-}
-
-TEST(GlobalObjectObservation) {
- HarmonyIsolate isolate;
- HandleScope scope;
- LocalContext context;
- Handle<Object> global_proxy = context->Global();
- Handle<Object> inner_global = global_proxy->GetPrototype().As<Object>();
- CompileRun(
- "var records = [];"
- "var global = this;"
- "Object.observe(global, function(r) { [].push.apply(records, r) });"
- "global.foo = 'hello';");
- CHECK_EQ(1, CompileRun("records.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records[0].object")));
-
- // Detached, mutating the proxy has no effect.
- context->DetachGlobal();
- CompileRun("global.bar = 'goodbye';");
- CHECK_EQ(1, CompileRun("records.length")->Int32Value());
-
- // Mutating the global object directly still has an effect...
- CompileRun("this.bar = 'goodbye';");
- CHECK_EQ(2, CompileRun("records.length")->Int32Value());
- CHECK(inner_global->StrictEquals(CompileRun("records[1].object")));
-
- // Reattached, back to global proxy.
- context->ReattachGlobal(global_proxy);
- CompileRun("global.baz = 'again';");
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records[2].object")));
-
- // Attached to a different context, should not leak mutations
- // to the old context.
- context->DetachGlobal();
- {
- LocalContext context2;
- context2->DetachGlobal();
- context2->ReattachGlobal(global_proxy);
- CompileRun(
- "var records2 = [];"
- "Object.observe(this, function(r) { [].push.apply(records2, r) });"
- "this.bat = 'context2';");
- CHECK_EQ(1, CompileRun("records2.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records2[0].object")));
- }
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
-
- // Attaching by passing to Context::New
- {
- // Delegates to Context::New
- LocalContext context3(NULL, Handle<ObjectTemplate>(), global_proxy);
- CompileRun(
- "var records3 = [];"
- "Object.observe(this, function(r) { [].push.apply(records3, r) });"
- "this.qux = 'context3';");
- CHECK_EQ(1, CompileRun("records3.length")->Int32Value());
- CHECK(global_proxy->StrictEquals(CompileRun("records3[0].object")));
- }
- CHECK_EQ(3, CompileRun("records.length")->Int32Value());
-}
diff --git a/deps/v8/test/cctest/test-parsing.cc b/deps/v8/test/cctest/test-parsing.cc
index ed480cd0d..717c66519 100755
--- a/deps/v8/test/cctest/test-parsing.cc
+++ b/deps/v8/test/cctest/test-parsing.cc
@@ -1041,31 +1041,6 @@ TEST(ScopePositions) {
}
-i::Handle<i::String> FormatMessage(i::ScriptDataImpl* data) {
- i::Handle<i::String> format = v8::Utils::OpenHandle(
- *v8::String::New(data->BuildMessage()));
- i::Vector<const char*> args = data->BuildArgs();
- i::Handle<i::JSArray> args_array = FACTORY->NewJSArray(args.length());
- for (int i = 0; i < args.length(); i++) {
- i::JSArray::SetElement(args_array,
- i,
- v8::Utils::OpenHandle(*v8::String::New(args[i])),
- NONE,
- i::kNonStrictMode);
- }
- i::Handle<i::JSObject> builtins(i::Isolate::Current()->js_builtins_object());
- i::Handle<i::Object> format_fun =
- i::GetProperty(builtins, "FormatMessage");
- i::Handle<i::Object> arg_handles[] = { format, args_array };
- bool has_exception = false;
- i::Handle<i::Object> result =
- i::Execution::Call(format_fun, builtins, 2, arg_handles, &has_exception);
- CHECK(!has_exception);
- CHECK(result->IsString());
- return i::Handle<i::String>::cast(result);
-}
-
-
void TestParserSync(i::Handle<i::String> source, int flags) {
uintptr_t stack_limit = i::Isolate::Current()->stack_guard()->real_climit();
bool harmony_scoping = ((i::kLanguageModeMask & flags) == i::EXTENDED_MODE);
@@ -1092,50 +1067,53 @@ void TestParserSync(i::Handle<i::String> source, int flags) {
i::FunctionLiteral* function = parser.ParseProgram();
i::FLAG_harmony_scoping = save_harmony_scoping;
- // Check that preparsing fails iff parsing fails.
+ i::String* type_string = NULL;
if (function == NULL) {
// Extract exception from the parser.
+ i::Handle<i::String> type_symbol = FACTORY->LookupAsciiSymbol("type");
CHECK(i::Isolate::Current()->has_pending_exception());
i::MaybeObject* maybe_object = i::Isolate::Current()->pending_exception();
i::JSObject* exception = NULL;
CHECK(maybe_object->To(&exception));
- i::Handle<i::JSObject> exception_handle(exception);
- i::Handle<i::String> message_string =
- i::Handle<i::String>::cast(i::GetProperty(exception_handle, "message"));
- if (!data.has_error()) {
- i::OS::Print(
- "Parser failed on:\n"
- "\t%s\n"
- "with error:\n"
- "\t%s\n"
- "However, the preparser succeeded",
- *source->ToCString(), *message_string->ToCString());
- CHECK(false);
- }
- // Check that preparser and parser produce the same error.
- i::Handle<i::String> preparser_message = FormatMessage(&data);
- if (!message_string->Equals(*preparser_message)) {
+ // Get the type string.
+ maybe_object = exception->GetProperty(*type_symbol);
+ CHECK(maybe_object->To(&type_string));
+ }
+
+ // Check that preparsing fails iff parsing fails.
+ if (data.has_error() && function != NULL) {
+ i::OS::Print(
+ "Preparser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the parser succeeded",
+ *source->ToCString(), data.BuildMessage());
+ CHECK(false);
+ } else if (!data.has_error() && function == NULL) {
+ i::OS::Print(
+ "Parser failed on:\n"
+ "\t%s\n"
+ "with error:\n"
+ "\t%s\n"
+ "However, the preparser succeeded",
+ *source->ToCString(), *type_string->ToCString());
+ CHECK(false);
+ }
+
+ // Check that preparser and parser produce the same error.
+ if (function == NULL) {
+ if (!type_string->IsEqualTo(i::CStrVector(data.BuildMessage()))) {
i::OS::Print(
"Expected parser and preparser to produce the same error on:\n"
"\t%s\n"
"However, found the following error messages\n"
"\tparser: %s\n"
"\tpreparser: %s\n",
- *source->ToCString(),
- *message_string->ToCString(),
- *preparser_message->ToCString());
+ *source->ToCString(), *type_string->ToCString(), data.BuildMessage());
CHECK(false);
}
- } else if (data.has_error()) {
- i::OS::Print(
- "Preparser failed on:\n"
- "\t%s\n"
- "with error:\n"
- "\t%s\n"
- "However, the parser succeeded",
- *source->ToCString(), *FormatMessage(&data)->ToCString());
- CHECK(false);
}
}
diff --git a/deps/v8/test/cctest/test-regexp.cc b/deps/v8/test/cctest/test-regexp.cc
index 726108d27..e433b925e 100644
--- a/deps/v8/test/cctest/test-regexp.cc
+++ b/deps/v8/test/cctest/test-regexp.cc
@@ -759,7 +759,7 @@ TEST(MacroAssemblerNativeSuccess) {
int captures[4] = {42, 37, 87, 117};
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
const byte* start_adr =
reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
@@ -805,7 +805,7 @@ TEST(MacroAssemblerNativeSimple) {
int captures[4] = {42, 37, 87, 117};
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -823,7 +823,7 @@ TEST(MacroAssemblerNativeSimple) {
CHECK_EQ(-1, captures[3]);
input = factory->NewStringFromAscii(CStrVector("barbarbar"));
- seq_input = Handle<SeqOneByteString>::cast(input);
+ seq_input = Handle<SeqAsciiString>::cast(input);
start_adr = seq_input->GetCharsAddress();
result = Execute(*code,
@@ -924,7 +924,7 @@ TEST(MacroAssemblerNativeBacktrack) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("foofoo"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -967,7 +967,7 @@ TEST(MacroAssemblerNativeBackReferenceASCII) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("fooofo"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[4];
@@ -1072,7 +1072,7 @@ TEST(MacroAssemblernativeAtStart) {
Handle<Code> code = Handle<Code>::cast(code_object);
Handle<String> input = factory->NewStringFromAscii(CStrVector("foobar"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -1133,7 +1133,7 @@ TEST(MacroAssemblerNativeBackRefNoCase) {
Handle<String> input =
factory->NewStringFromAscii(CStrVector("aBcAbCABCxYzab"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[4];
@@ -1234,7 +1234,7 @@ TEST(MacroAssemblerNativeRegisters) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("foofoofoofoofoo"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int output[6];
@@ -1278,7 +1278,7 @@ TEST(MacroAssemblerStackOverflow) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("dummy"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
NativeRegExpMacroAssembler::Result result =
@@ -1325,7 +1325,7 @@ TEST(MacroAssemblerNativeLotsOfRegisters) {
// String long enough for test (content doesn't matter).
Handle<String> input =
factory->NewStringFromAscii(CStrVector("sample text"));
- Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
+ Handle<SeqAsciiString> seq_input = Handle<SeqAsciiString>::cast(input);
Address start_adr = seq_input->GetCharsAddress();
int captures[2];
diff --git a/deps/v8/test/mjsunit/array-bounds-check-removal.js b/deps/v8/test/mjsunit/array-bounds-check-removal.js
index 7a7cb304d..df7988bda 100644
--- a/deps/v8/test/mjsunit/array-bounds-check-removal.js
+++ b/deps/v8/test/mjsunit/array-bounds-check-removal.js
@@ -178,29 +178,5 @@ short_test(a, 0);
assertTrue(%GetOptimizationStatus(short_test) != 1);
-// A test for when we would modify a phi index.
-var data_phi = [0, 1, 2, 3, 4, 5, 6, 7, 8];
-function test_phi(a, base, check) {
- var index;
- if (check) {
- index = base + 1;
- } else {
- index = base + 2;
- }
- var result = a[index];
- result += a[index + 1];
- result += a[index - 1];
- return result;
-}
-var result_phi = 0;
-result_phi = test_phi(data_phi, 3, true);
-assertEquals(12, result_phi);
-result_phi = test_phi(data_phi, 3, true);
-assertEquals(12, result_phi);
-%OptimizeFunctionOnNextCall(test_phi);
-result_phi = test_phi(data_phi, 3, true);
-assertEquals(12, result_phi);
-
-
gc();
diff --git a/deps/v8/test/mjsunit/array-natives-elements.js b/deps/v8/test/mjsunit/array-natives-elements.js
deleted file mode 100644
index 96a8cb5d1..000000000
--- a/deps/v8/test/mjsunit/array-natives-elements.js
+++ /dev/null
@@ -1,307 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --smi-only-arrays
-// Flags: --noparallel-recompilation
-
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile time
-// sticks if built with snapshot. If --smi-only-arrays is deactivated by
-// default, only a no-snapshot build actually has smi-only arrays enabled in
-// this test case. Depending on whether smi-only arrays are actually enabled,
-// this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
-
-if (support_smi_only_arrays) {
- print("Tests include smi-only arrays.");
-} else {
- print("Tests do NOT include smi-only arrays.");
-}
-
-// IC and Crankshaft support for smi-only elements in dynamic array literals.
-function get(foo) { return foo; } // Used to generate dynamic values.
-
-function array_natives_test() {
-
- // Ensure small array literals start in specific element kind mode.
- assertTrue(%HasFastSmiElements([]));
- assertTrue(%HasFastSmiElements([1]));
- assertTrue(%HasFastSmiElements([1,2]));
- assertTrue(%HasFastDoubleElements([1.1]));
- assertTrue(%HasFastDoubleElements([1.1,2]));
-
- // Push
- var a0 = [1, 2, 3];
- assertTrue(%HasFastSmiElements(a0));
- a0.push(4);
- assertTrue(%HasFastSmiElements(a0));
- a0.push(1.3);
- assertTrue(%HasFastDoubleElements(a0));
- a0.push(1.5);
- assertTrue(%HasFastDoubleElements(a0));
- a0.push({});
- assertTrue(%HasFastObjectElements(a0));
- a0.push({});
- assertTrue(%HasFastObjectElements(a0));
- assertEquals([1,2,3,4,1.3,1.5,{},{}], a0);
-
- // Concat
- var a1;
- a1 = [1,2,3].concat([]);
- assertTrue(%HasFastSmiElements(a1));
- assertEquals([1,2,3], a1);
- a1 = [1,2,3].concat([4,5,6]);
- assertTrue(%HasFastSmiElements(a1));
- assertEquals([1,2,3,4,5,6], a1);
- a1 = [1,2,3].concat([4,5,6], [7,8,9]);
- assertTrue(%HasFastSmiElements(a1));
- assertEquals([1,2,3,4,5,6,7,8,9], a1);
- a1 = [1.1,2,3].concat([]);
- assertTrue(%HasFastDoubleElements(a1));
- assertEquals([1.1,2,3], a1);
- a1 = [1,2,3].concat([1.1, 2]);
- assertTrue(%HasFastDoubleElements(a1));
- assertEquals([1,2,3,1.1,2], a1);
- a1 = [1.1,2,3].concat([1, 2]);
- assertTrue(%HasFastDoubleElements(a1));
- assertEquals([1.1,2,3,1,2], a1);
- a1 = [1.1,2,3].concat([1.2, 2]);
- assertTrue(%HasFastDoubleElements(a1));
- assertEquals([1.1,2,3,1.2,2], a1);
-
- a1 = [1,2,3].concat([{}]);
- assertTrue(%HasFastObjectElements(a1));
- assertEquals([1,2,3,{}], a1);
- a1 = [1.1,2,3].concat([{}]);
- assertTrue(%HasFastObjectElements(a1));
- assertEquals([1.1,2,3,{}], a1);
- a1 = [{}].concat([1,2,3]);
- assertTrue(%HasFastObjectElements(a1));
- assertEquals([{},1,2,3], a1);
- a1 = [{}].concat([1.1,2,3]);
- assertTrue(%HasFastObjectElements(a1));
- assertEquals([{},1.1,2,3], a1);
-
- // Slice
- var a2 = [1,2,3];
- assertTrue(%HasFastSmiElements(a2.slice()));
- assertTrue(%HasFastSmiElements(a2.slice(1)));
- assertTrue(%HasFastSmiElements(a2.slice(1, 2)));
- assertEquals([1,2,3], a2.slice());
- assertEquals([2,3], a2.slice(1));
- assertEquals([2], a2.slice(1,2));
- a2 = [1.1,2,3];
- assertTrue(%HasFastDoubleElements(a2.slice()));
- assertTrue(%HasFastDoubleElements(a2.slice(1)));
- assertTrue(%HasFastDoubleElements(a2.slice(1, 2)));
- assertEquals([1.1,2,3], a2.slice());
- assertEquals([2,3], a2.slice(1));
- assertEquals([2], a2.slice(1,2));
- a2 = [{},2,3];
- assertTrue(%HasFastObjectElements(a2.slice()));
- assertTrue(%HasFastObjectElements(a2.slice(1)));
- assertTrue(%HasFastObjectElements(a2.slice(1, 2)));
- assertEquals([{},2,3], a2.slice());
- assertEquals([2,3], a2.slice(1));
- assertEquals([2], a2.slice(1,2));
-
- // Splice
- var a3 = [1,2,3];
- var a3r;
- a3r = a3.splice(0, 0);
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastSmiElements(a3));
- assertEquals([], a3r);
- assertEquals([1, 2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 1);
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastSmiElements(a3));
- assertEquals([1], a3r);
- assertEquals([2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 0, 2);
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastSmiElements(a3));
- assertEquals([], a3r);
- assertEquals([2, 1, 2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 1, 2);
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastSmiElements(a3));
- assertEquals([1], a3r);
- assertEquals([2, 2, 3], a3);
-
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 0);
- assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([], a3r);
- assertEquals([1.1, 2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 1);
- assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([1.1], a3r);
- assertEquals([2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 0, 2);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([], a3r);
- assertEquals([2, 1.1, 2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 1, 2);
- assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([1.1], a3r);
- assertEquals([2, 2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 0, 2.1);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([], a3r);
- assertEquals([2.1, 1.1, 2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 1, 2.2);
- assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([1.1], a3r);
- assertEquals([2.2, 2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 0, 2.1);
- // Commented out since handled in js, which takes the best fit.
- // assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastSmiElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([], a3r);
- assertEquals([2.1, 1, 2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 1, 2.2);
- assertTrue(%HasFastDoubleElements(a3r));
- assertTrue(%HasFastDoubleElements(a3));
- assertEquals([1], a3r);
- assertEquals([2.2, 2, 3], a3);
-
- a3 = [{},2,3];
- a3r = a3.splice(0, 0);
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([], a3r);
- assertEquals([{}, 2, 3], a3);
- a3 = [1,2,{}];
- a3r = a3.splice(0, 1);
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([1], a3r);
- assertEquals([2, {}], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 0, {});
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([], a3r);
- assertEquals([{}, 1, 2, 3], a3);
- a3 = [1,2,3];
- a3r = a3.splice(0, 1, {});
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([1], a3r);
- assertEquals([{}, 2, 3], a3);
-
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 0, {});
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([], a3r);
- assertEquals([{}, 1.1, 2, 3], a3);
- a3 = [1.1,2,3];
- a3r = a3.splice(0, 1, {});
- assertTrue(%HasFastObjectElements(a3r));
- assertTrue(%HasFastObjectElements(a3));
- assertEquals([1.1], a3r);
- assertEquals([{}, 2, 3], a3);
-
- // Pop
- var a4 = [1,2,3];
- assertEquals(3, a4.pop());
- assertTrue(%HasFastSmiElements(a4));
- a4 = [1.1,2,3];
- assertEquals(3, a4.pop());
- assertTrue(%HasFastDoubleElements(a4));
- a4 = [{},2,3];
- assertEquals(3, a4.pop());
- assertTrue(%HasFastObjectElements(a4));
-
- // Shift
- var a4 = [1,2,3];
- assertEquals(1, a4.shift());
- assertTrue(%HasFastSmiElements(a4));
- a4 = [1.1,2,3];
- assertEquals(1.1, a4.shift());
- assertTrue(%HasFastDoubleElements(a4));
- a4 = [{},2,3];
- assertEquals({}, a4.shift());
- assertTrue(%HasFastObjectElements(a4));
-
- // Unshift
- var a4 = [1,2,3];
- a4.unshift(1);
- assertTrue(%HasFastSmiElements(a4));
- assertEquals([1,1,2,3], a4);
- a4 = [1,2,3];
- a4.unshift(1.1);
- // TODO(verwaest): We'll want to support double unshifting as well.
- // assertTrue(%HasFastDoubleElements(a4));
- assertTrue(%HasFastObjectElements(a4));
- assertEquals([1.1,1,2,3], a4);
- a4 = [1.1,2,3];
- a4.unshift(1);
- // assertTrue(%HasFastDoubleElements(a4));
- assertTrue(%HasFastObjectElements(a4));
- assertEquals([1,1.1,2,3], a4);
- a4 = [{},2,3];
- a4.unshift(1);
- assertTrue(%HasFastObjectElements(a4));
- assertEquals([1,{},2,3], a4);
- a4 = [{},2,3];
- a4.unshift(1.1);
- assertTrue(%HasFastObjectElements(a4));
- assertEquals([1.1,{},2,3], a4);
-}
-
-if (support_smi_only_arrays) {
- for (var i = 0; i < 3; i++) {
- array_natives_test();
- }
- %OptimizeFunctionOnNextCall(array_natives_test);
- array_natives_test();
-}
diff --git a/deps/v8/test/mjsunit/array-reduce.js b/deps/v8/test/mjsunit/array-reduce.js
index 429f34808..1e9618826 100755
--- a/deps/v8/test/mjsunit/array-reduce.js
+++ b/deps/v8/test/mjsunit/array-reduce.js
@@ -418,8 +418,8 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce callback not a function not throwing TypeError");
- assertTrue(e.message.indexOf(" is not a function") >= 0,
- "reduce non function TypeError type");
+ assertEquals("called_non_callable", e.type,
+ "reduce non function TypeError type");
}
assertTrue(exception);
@@ -430,8 +430,8 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight callback not a function not throwing TypeError");
- assertTrue(e.message.indexOf(" is not a function") >= 0,
- "reduceRight non function TypeError type");
+ assertEquals("called_non_callable", e.type,
+ "reduceRight non function TypeError type");
}
assertTrue(exception);
@@ -442,7 +442,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce no initial value not throwing TypeError");
- assertEquals("Reduce of empty array with no initial value", e.message,
+ assertEquals("reduce_no_initial", e.type,
"reduce no initial TypeError type");
}
assertTrue(exception);
@@ -454,7 +454,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight no initial value not throwing TypeError");
- assertEquals("Reduce of empty array with no initial value", e.message,
+ assertEquals("reduce_no_initial", e.type,
"reduceRight no initial TypeError type");
}
assertTrue(exception);
@@ -466,7 +466,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduce sparse no initial value not throwing TypeError");
- assertEquals("Reduce of empty array with no initial value", e.message,
+ assertEquals("reduce_no_initial", e.type,
"reduce no initial TypeError type");
}
assertTrue(exception);
@@ -478,7 +478,7 @@ try {
exception = true;
assertTrue(e instanceof TypeError,
"reduceRight sparse no initial value not throwing TypeError");
- assertEquals("Reduce of empty array with no initial value", e.message,
+ assertEquals("reduce_no_initial", e.type,
"reduceRight no initial TypeError type");
}
assertTrue(exception);
diff --git a/deps/v8/test/mjsunit/array-slice.js b/deps/v8/test/mjsunit/array-slice.js
index ae0e3bc1e..5ae31dc52 100644
--- a/deps/v8/test/mjsunit/array-slice.js
+++ b/deps/v8/test/mjsunit/array-slice.js
@@ -290,15 +290,3 @@
func('a', 'b', 'c');
})();
-
-// Check slicing of holey objects with elements in the prototype
-(function() {
- function f() {
- delete arguments[1];
- arguments.__proto__[1] = 5;
- var result = Array.prototype.slice.call(arguments);
- delete arguments.__proto__[1];
- assertEquals([1,5,3], result);
- }
- f(1,2,3);
-})();
diff --git a/deps/v8/test/mjsunit/array-store-and-grow.js b/deps/v8/test/mjsunit/array-store-and-grow.js
index 88f3db8f6..131d4ebc5 100644
--- a/deps/v8/test/mjsunit/array-store-and-grow.js
+++ b/deps/v8/test/mjsunit/array-store-and-grow.js
@@ -99,10 +99,7 @@ array_store_5(a, 1, 0.5);
a = makeCOW();
array_store_5(a, 1, 0.5);
assertEquals(0.5, a[1]);
-a = [];
-assertEquals(0.5, array_store_5(a, 1, 0.5));
-assertEquals(undefined, a[0]);
-assertEquals(0.5, a[1]);
+assertEquals(0.5, array_store_5([], 1, 0.5));
function array_store_6(a,b,c) {
return (a[b] = c);
diff --git a/deps/v8/test/mjsunit/compiler/multiply-add.js b/deps/v8/test/mjsunit/compiler/multiply-add.js
deleted file mode 100644
index 2b4304e84..000000000
--- a/deps/v8/test/mjsunit/compiler/multiply-add.js
+++ /dev/null
@@ -1,69 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-// Test expressions that can be computed with a multiply-add instruction.
-
-function f(a, b, c) {
- return a * b + c;
-}
-
-function g(a, b, c) {
- return a + b * c;
-}
-
-function h(a, b, c, d) {
- return a * b + c * d;
-}
-
-assertEquals(5, f(1, 2, 3));
-assertEquals(5, f(1, 2, 3));
-%OptimizeFunctionOnNextCall(f);
-assertEquals(5, f(1, 2, 3));
-assertEquals("2foo", f(1, 2, "foo"));
-assertEquals(5.41, f(1.1, 2.1, 3.1));
-assertEquals(5.41, f(1.1, 2.1, 3.1));
-%OptimizeFunctionOnNextCall(f);
-assertEquals(5.41, f(1.1, 2.1, 3.1));
-
-assertEquals(7, g(1, 2, 3));
-assertEquals(7, g(1, 2, 3));
-%OptimizeFunctionOnNextCall(g);
-assertEquals(7, g(1, 2, 3));
-assertEquals(8.36, g(1.1, 2.2, 3.3));
-assertEquals(8.36, g(1.1, 2.2, 3.3));
-%OptimizeFunctionOnNextCall(g);
-assertEquals(8.36, g(1.1, 2.2, 3.3));
-
-assertEquals(14, h(1, 2, 3, 4));
-assertEquals(14, h(1, 2, 3, 4));
-%OptimizeFunctionOnNextCall(h);
-assertEquals(14, h(1, 2, 3, 4));
-assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
-assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
-%OptimizeFunctionOnNextCall(h);
-assertEquals(15.02, h(1.1, 2.1, 3.1, 4.1));
diff --git a/deps/v8/test/mjsunit/compiler/proto-chain-load.js b/deps/v8/test/mjsunit/compiler/proto-chain-load.js
deleted file mode 100644
index 60c6431d2..000000000
--- a/deps/v8/test/mjsunit/compiler/proto-chain-load.js
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// Test HLoadNamedField on the proto chain.
-
-var obj4 = Object.create(null, { f4: {value: 4} });
-var obj3 = Object.create(obj4, { f3: {value: 3} });
-var obj2 = Object.create(obj3, { f2: {value: 2} });
-var obj1 = Object.create(obj2, { f1: {value: 1} });
-var obj0 = Object.create(obj1, { f0: {value: 0} });
-
-function get4(obj) { return obj.f4; }
-
-assertEquals(4, get4(obj0));
-assertEquals(4, get4(obj0));
-%OptimizeFunctionOnNextCall(get4);
-assertEquals(4, get4(obj0));
-assertEquals(4, get4(obj0));
diff --git a/deps/v8/test/mjsunit/compiler/rotate.js b/deps/v8/test/mjsunit/compiler/rotate.js
deleted file mode 100644
index 14fe9da3e..000000000
--- a/deps/v8/test/mjsunit/compiler/rotate.js
+++ /dev/null
@@ -1,224 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --expose-gc
-
-// Test shift operations that can be replaced by rotate operation.
-
-function SideEffect() {
- with ({}) { } // not inlinable
-}
-
-function Twenty() {
- SideEffect();
- return 20;
-}
-
-function Twelve() {
- SideEffect();
- return 12;
-}
-
-
-function ROR(x, sa) {
- return (x >>> sa) | (x << (32 - sa));
-}
-
-function ROR1(x, sa) {
- return (x >>> sa) | (x << (32 - sa));
-}
-
-function ROR2(x, sa) {
- return (x >>> (32 - sa)) | (x << (sa));
-}
-
-function ROR3(x, sa) {
- return (x << (32 - sa)) | (x >>> sa);
-}
-
-function ROR4(x, sa) {
- return (x << (sa)) | (x >>> (32 - sa));
-}
-
-assertEquals(1 << ((2 % 32)), ROR(1, 30));
-assertEquals(1 << ((2 % 32)), ROR(1, 30));
-%OptimizeFunctionOnNextCall(ROR);
-assertEquals(1 << ((2 % 32)), ROR(1, 30));
-
-assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
-assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
-%OptimizeFunctionOnNextCall(ROR1);
-assertEquals(0xF0000FFF | 0, ROR1(0x0000FFFF, 4));
-
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
-%OptimizeFunctionOnNextCall(ROR1);
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, 20));
-
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
-%OptimizeFunctionOnNextCall(ROR1);
-assertEquals(0x0FFFF000 | 0, ROR1(0x0000FFFF, Twenty()));
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(0xFFFFFFFF | 0, ROR1(0xFFFFFFFF, i));
- assertEquals(0xFFFFFFFF | 0, ROR1(0xFFFFFFFF, i));
- %OptimizeFunctionOnNextCall(ROR1);
- assertEquals(0xFFFFFFFF | 0, ROR1(0xFFFFFFFF, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(-1, ROR1(-1, i));
- assertEquals(-1, ROR1(-1, i));
- %OptimizeFunctionOnNextCall(ROR1);
- assertEquals(-1, ROR1(-1, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(1 << (32 - (i % 32)), ROR1(1, i));
- assertEquals(1 << (32 - (i % 32)), ROR1(1, i));
- %OptimizeFunctionOnNextCall(ROR1);
- assertEquals(1 << (32 - (i % 32)), ROR1(1, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(1 << (32 - (i % 32)), ROR1(1.4, i));
- assertEquals(1 << (32 - (i % 32)), ROR1(1.4, i));
- %OptimizeFunctionOnNextCall(ROR1);
- assertEquals(1 << (32 - (i % 32)), ROR1(1.4, i));
-}
-
-
-
-assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
-assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
-%OptimizeFunctionOnNextCall(ROR2);
-assertEquals(0xF0000FFF | 0, ROR2(0x0000FFFF, 28));
-
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
-%OptimizeFunctionOnNextCall(ROR2);
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, 12));
-
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
-%OptimizeFunctionOnNextCall(ROR2);
-assertEquals(0x0FFFF000 | 0, ROR2(0x0000FFFF, Twelve()));
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(0xFFFFFFFF | 0, ROR2(0xFFFFFFFF, i));
- assertEquals(0xFFFFFFFF | 0, ROR2(0xFFFFFFFF, i));
- %OptimizeFunctionOnNextCall(ROR2);
- assertEquals(0xFFFFFFFF | 0, ROR2(0xFFFFFFFF, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(-1, ROR2(-1, i));
- assertEquals(-1, ROR2(-1, i));
- %OptimizeFunctionOnNextCall(ROR2);
- assertEquals(-1, ROR2(-1, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(1 << ((i % 32)), ROR2(1, i));
- assertEquals(1 << ((i % 32)), ROR2(1, i));
- %OptimizeFunctionOnNextCall(ROR2);
- assertEquals(1 << ((i % 32)), ROR2(1, i));
-}
-
-assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
-assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
-%OptimizeFunctionOnNextCall(ROR3);
-assertEquals(0xF0000FFF | 0, ROR3(0x0000FFFF, 4));
-
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
-%OptimizeFunctionOnNextCall(ROR3);
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, 20));
-
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
-%OptimizeFunctionOnNextCall(ROR3);
-assertEquals(0x0FFFF000 | 0, ROR3(0x0000FFFF, Twenty()));
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(0xFFFFFFFF | 0, ROR3(0xFFFFFFFF, i));
- assertEquals(0xFFFFFFFF | 0, ROR3(0xFFFFFFFF, i));
- %OptimizeFunctionOnNextCall(ROR3);
- assertEquals(0xFFFFFFFF | 0, ROR3(0xFFFFFFFF, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(-1, ROR3(-1, i));
- assertEquals(-1, ROR3(-1, i));
- %OptimizeFunctionOnNextCall(ROR3);
- assertEquals(-1, ROR3(-1, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
- assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
- %OptimizeFunctionOnNextCall(ROR3);
- assertEquals(1 << (32 - (i % 32)), ROR3(1, i));
-}
-
-assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
-assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
-%OptimizeFunctionOnNextCall(ROR4);
-assertEquals(0xF0000FFF | 0, ROR4(0x0000FFFF, 28));
-
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
-%OptimizeFunctionOnNextCall(ROR4);
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, 12));
-
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
-%OptimizeFunctionOnNextCall(ROR4);
-assertEquals(0x0FFFF000 | 0, ROR4(0x0000FFFF, Twelve()));
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(0xFFFFFFFF | 0, ROR4(0xFFFFFFFF, i));
- assertEquals(0xFFFFFFFF | 0, ROR4(0xFFFFFFFF, i));
- %OptimizeFunctionOnNextCall(ROR4);
- assertEquals(0xFFFFFFFF | 0, ROR4(0xFFFFFFFF, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(-1, ROR4(-1, i));
- assertEquals(-1, ROR4(-1, i));
- %OptimizeFunctionOnNextCall(ROR4);
- assertEquals(-1, ROR4(-1, i));
-}
-
-for (var i = 0; i <= 100; i++) {
- assertEquals(1 << ((i % 32)), ROR4(1, i));
- assertEquals(1 << ((i % 32)), ROR4(1, i));
- %OptimizeFunctionOnNextCall(ROR4);
- assertEquals(1 << ((i % 32)), ROR4(1, i));
-}
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-compile-error.js b/deps/v8/test/mjsunit/debug-liveedit-compile-error.js
deleted file mode 100644
index 2fd6aedab..000000000
--- a/deps/v8/test/mjsunit/debug-liveedit-compile-error.js
+++ /dev/null
@@ -1,60 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-
-Debug = debug.Debug
-
-eval("var something1 = 25; \n"
- + " function ChooseAnimal() { return 'Cat'; } \n"
- + " ChooseAnimal.Helper = function() { return 'Help!'; }\n");
-
-assertEquals("Cat", ChooseAnimal());
-
-var script = Debug.findScript(ChooseAnimal);
-
-var orig_animal = "Cat";
-var patch_pos = script.source.indexOf(orig_animal);
-var new_animal_patch = "Cap' + ) + 'bara";
-
-var change_log = new Array();
-var caught_exception = null;
-try {
- Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
- orig_animal.length, new_animal_patch, change_log);
-} catch (e) {
- caught_exception = e;
-}
-
-assertNotNull(caught_exception);
-assertEquals("Unexpected token )",
- caught_exception.details.syntaxErrorMessage);
-
-assertEquals(2, caught_exception.details.position.start.line);
-
-
diff --git a/deps/v8/test/mjsunit/debug-liveedit-literals.js b/deps/v8/test/mjsunit/debug-liveedit-literals.js
deleted file mode 100644
index 5f9217e83..000000000
--- a/deps/v8/test/mjsunit/debug-liveedit-literals.js
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2010 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-// Get the Debug object exposed from the debug context global object.
-
-Debug = debug.Debug
-
-function Test(old_expression, new_expression) {
- // Generate several instances of function to test that we correctly fix
- // all functions in memory.
- var function_instance_number = 11;
- eval("var t1 =1;\n" +
- "ChooseAnimalArray = [];\n" +
- "for (var i = 0; i < function_instance_number; i++) {\n" +
- " ChooseAnimalArray.push(\n" +
- " function ChooseAnimal() {\n" +
- " return " + old_expression + ";\n" +
- " });\n" +
- "}\n" +
- "var t2 =1;\n");
-
- for (var i = 0; i < ChooseAnimalArray.length; i++) {
- assertEquals("Cat", ChooseAnimalArray[i]());
- }
-
- var script = Debug.findScript(ChooseAnimalArray[0]);
-
- var patch_pos = script.source.indexOf(old_expression);
- var new_animal_patch = new_expression;
-
- var change_log = new Array();
- Debug.LiveEdit.TestApi.ApplySingleChunkPatch(script, patch_pos,
- old_expression.length, new_expression, change_log);
-
- for (var i = 0; i < ChooseAnimalArray.length; i++) {
- assertEquals("Capybara", ChooseAnimalArray[i]());
- }
-}
-
-// Check that old literal boilerplate was reset.
-Test("['Cat'][0]", "['Capybara'][0]");
-Test("['Cat'][0]", "{a:'Capybara'}.a");
-
-// No literals -> 1 literal.
-Test("'Cat'", "['Capybara'][0]");
-
-// No literals -> 2 literals.
-Test("'Cat'", "['Capy'][0] + {a:'bara'}.a");
-
-// 1 literal -> no literals.
-Test("['Cat'][0]", "'Capybara'");
-
-// 2 literals -> no literals.
-Test("['Ca'][0] + {a:'t'}.a", "'Capybara'");
-
-// No literals -> regexp.
-Test("'Cat'", "(/.A.Y.A.A/i).exec('Capybara')[0]");
-
-// Array literal -> regexp.
-Test("['Cat'][0]", "(/.A.Y.A.A/i).exec('Capybara')[0]");
-
-// Regexp -> object literal.
-Test("(/.A./i).exec('Cat')[0]", "{c:'Capybara'}.c");
-
-// No literals -> regexp.
-Test("'Cat'", "(/.A.Y.A.A/i).exec('Capybara')[0]");
-
-// Regexp -> no literals.
-Test("(/.A./i).exec('Cat')[0]", "'Capybara'");
diff --git a/deps/v8/test/mjsunit/debug-set-variable-value.js b/deps/v8/test/mjsunit/debug-set-variable-value.js
deleted file mode 100644
index dac886145..000000000
--- a/deps/v8/test/mjsunit/debug-set-variable-value.js
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug
-
-// Get the Debug object exposed from the debug context global object.
-var Debug = debug.Debug;
-
-// Accepts a function/closure 'fun' that must have a debugger statement inside.
-// A variable 'variable_name' must be initialized before debugger statement
-// and returned after the statement. The test will alter variable value when
-// on debugger statement and check that returned value reflects the change.
-function RunPauseTest(scope_number, variable_name, expected_new_value, fun) {
- var old_value = fun();
-
- var listener_delegate;
- var listener_called = false;
- var exception = null;
-
- function listener_delegate(exec_state) {
- var scope = exec_state.frame(0).scope(scope_number);
- scope.setVariableValue(variable_name, expected_new_value);
- }
-
- function listener(event, exec_state, event_data, data) {
- try {
- if (event == Debug.DebugEvent.Break) {
- listener_called = true;
- listener_delegate(exec_state);
- }
- } catch (e) {
- exception = e;
- }
- }
-
- // Add the debug event listener.
- Debug.setListener(listener);
-
- var actual_new_value;
- try {
- actual_new_value = fun();
- } finally {
- Debug.setListener(null);
- }
-
- if (exception != null) {
- assertUnreachable("Exception: " + exception);
- }
- assertTrue(listener_called);
-
- assertTrue(old_value != actual_new_value);
- assertTrue(expected_new_value == actual_new_value);
-}
-
-// Accepts a closure 'fun' that returns a variable from it's outer scope.
-// The test changes the value of variable via the handle to function and checks
-// that the return value changed accordingly.
-function RunClosureTest(scope_number, variable_name, expected_new_value, fun) {
- var old_value = fun();
-
- var fun_mirror = Debug.MakeMirror(fun);
-
- var scope = fun_mirror.scope(scope_number);
- scope.setVariableValue(variable_name, expected_new_value);
-
- var actual_new_value = fun();
-
- assertTrue(old_value != actual_new_value);
- assertTrue(expected_new_value == actual_new_value);
-}
-
-// Test changing variable value when in pause
-RunPauseTest(1, 'v1', 5, (function Factory() {
- var v1 = 'cat';
- return function() {
- debugger;
- return v1;
- }
-})());
-
-RunPauseTest(1, 'v2', 11, (function Factory(v2) {
- return function() {
- debugger;
- return v2;
- }
-})('dog'));
-
-RunPauseTest(3, 'foo', 77, (function Factory() {
- var foo = "capybara";
- return (function() {
- var bar = "fish";
- try {
- throw {name: "test exception"};
- } catch (e) {
- return function() {
- debugger;
- bar = "beast";
- return foo;
- }
- }
- })();
-})());
-
-
-
-// Test changing variable value in closure by handle
-RunClosureTest(0, 'v1', 5, (function Factory() {
- var v1 = 'cat';
- return function() {
- return v1;
- }
-})());
-
-RunClosureTest(0, 'v2', 11, (function Factory(v2) {
- return function() {
- return v2;
- }
-})('dog'));
-
-RunClosureTest(2, 'foo', 77, (function Factory() {
- var foo = "capybara";
- return (function() {
- var bar = "fish";
- try {
- throw {name: "test exception"};
- } catch (e) {
- return function() {
- bar = "beast";
- return foo;
- }
- }
- })();
-})());
-
-
-// Test value description protocol JSON
-assertEquals(true, Debug.TestApi.CommandProcessorResolveValue({value: true}));
-
-assertSame(null, Debug.TestApi.CommandProcessorResolveValue({type: "null"}));
-assertSame(undefined,
- Debug.TestApi.CommandProcessorResolveValue({type: "undefined"}));
-
-assertSame("123", Debug.TestApi.CommandProcessorResolveValue(
- {type: "string", stringDescription: "123"}));
-assertSame(123, Debug.TestApi.CommandProcessorResolveValue(
- {type: "number", stringDescription: "123"}));
-
-assertSame(Number, Debug.TestApi.CommandProcessorResolveValue(
- {handle: Debug.MakeMirror(Number).handle()}));
-assertSame(RunClosureTest, Debug.TestApi.CommandProcessorResolveValue(
- {handle: Debug.MakeMirror(RunClosureTest).handle()}));
-
diff --git a/deps/v8/test/mjsunit/elements-kind.js b/deps/v8/test/mjsunit/elements-kind.js
index cf9c21605..b74a21243 100644
--- a/deps/v8/test/mjsunit/elements-kind.js
+++ b/deps/v8/test/mjsunit/elements-kind.js
@@ -321,7 +321,8 @@ if (support_smi_only_arrays) {
assertKind(elements_kind.fast_double, b);
var c = a.concat(b);
assertEquals([1, 2, 4.5, 5.5], c);
- assertKind(elements_kind.fast_double, c);
+ // TODO(1810): Change implementation so that we get DOUBLE elements here?
+ assertKind(elements_kind.fast, c);
}
// Test that Array.push() correctly handles SMI elements.
diff --git a/deps/v8/test/mjsunit/elements-length-no-holey.js b/deps/v8/test/mjsunit/elements-length-no-holey.js
deleted file mode 100644
index 5bac296e1..000000000
--- a/deps/v8/test/mjsunit/elements-length-no-holey.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-a = [1,2,3];
-a.length = 1;
-assertFalse(%HasFastHoleyElements(a));
-assertTrue(%HasFastSmiElements(a));
diff --git a/deps/v8/test/mjsunit/error-accessors.js b/deps/v8/test/mjsunit/error-accessors.js
deleted file mode 100644
index 958105024..000000000
--- a/deps/v8/test/mjsunit/error-accessors.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Test that the message property of error objects is a data property.
-
-var o;
-
-// message is constructed using the constructor.
-var error1 = new Error("custom message");
-o = {};
-o.__proto__ = error1;
-
-assertEquals("custom message",
- Object.getOwnPropertyDescriptor(error1, "message").value);
-o.message = "another message";
-assertEquals("another message", o.message);
-assertEquals("custom message", error1.message);
-
-// message is constructed by the runtime.
-var error2;
-try { x.x } catch (e) { error2 = e; }
-o = {};
-o.__proto__ = error2;
-
-assertEquals("x is not defined",
- Object.getOwnPropertyDescriptor(error2, "message").value);
-o.message = "another message";
-assertEquals("another message", o.message);
-assertEquals("x is not defined", error2.message);
-
diff --git a/deps/v8/test/mjsunit/error-constructors.js b/deps/v8/test/mjsunit/error-constructors.js
index 84c6bbfd0..107164df5 100644
--- a/deps/v8/test/mjsunit/error-constructors.js
+++ b/deps/v8/test/mjsunit/error-constructors.js
@@ -36,6 +36,10 @@ assertFalse(desc['enumerable']);
var e = new Error("foobar");
desc = Object.getOwnPropertyDescriptor(e, 'message');
assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(e, 'arguments');
+assertFalse(desc['enumerable']);
+desc = Object.getOwnPropertyDescriptor(e, 'type');
+assertFalse(desc['enumerable']);
desc = Object.getOwnPropertyDescriptor(e, 'stack');
assertFalse(desc['enumerable']);
@@ -53,17 +57,26 @@ for (var v in e) {
function fail() { assertUnreachable(); };
ReferenceError.prototype.__defineSetter__('name', fail);
ReferenceError.prototype.__defineSetter__('message', fail);
+ReferenceError.prototype.__defineSetter__('type', fail);
+ReferenceError.prototype.__defineSetter__('arguments', fail);
ReferenceError.prototype.__defineSetter__('stack', fail);
var e = new ReferenceError();
assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
var e = new ReferenceError('123');
assertTrue(e.hasOwnProperty('message'));
assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
var e = %MakeReferenceError("my_test_error", [0, 1]);
assertTrue(e.hasOwnProperty('stack'));
+assertTrue(e.hasOwnProperty('type'));
+assertTrue(e.hasOwnProperty('arguments'));
+assertEquals("my_test_error", e.type)
// Check that intercepting property access from toString is prevented for
// compiler errors. This is not specified, but allowing interception
@@ -73,7 +86,7 @@ var errors = [SyntaxError, ReferenceError, TypeError];
for (var i in errors) {
var name = errors[i].prototype.toString();
// Monkey-patch prototype.
- var props = ["name", "message", "stack"];
+ var props = ["name", "message", "type", "arguments", "stack"];
for (var j in props) {
errors[i].prototype.__defineGetter__(props[j], fail);
}
diff --git a/deps/v8/test/mjsunit/function-call.js b/deps/v8/test/mjsunit/function-call.js
index 92792ac82..26890ed11 100644
--- a/deps/v8/test/mjsunit/function-call.js
+++ b/deps/v8/test/mjsunit/function-call.js
@@ -67,7 +67,8 @@ var should_throw_on_null_and_undefined =
String.prototype.toLocaleLowerCase,
String.prototype.toUpperCase,
String.prototype.toLocaleUpperCase,
- String.prototype.trim];
+ String.prototype.trim,
+ Number.prototype.toLocaleString];
// Non generic natives do not work on any input other than the specific
// type, but since this change will allow call to be invoked with undefined
@@ -149,11 +150,6 @@ var reducing_functions =
[Array.prototype.reduce,
Array.prototype.reduceRight];
-function checkExpectedMessage(e) {
- assertTrue(e.message.indexOf("called on null or undefined") >= 0 ||
- e.message.indexOf("Cannot convert null to object") >= 0);
-}
-
// Test that all natives using the ToObject call throw the right exception.
for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
// Sanity check that all functions are correct
@@ -170,7 +166,8 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].call(null);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
@@ -179,7 +176,8 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].call(undefined);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
@@ -188,7 +186,8 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].apply(null);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
@@ -197,7 +196,8 @@ for (var i = 0; i < should_throw_on_null_and_undefined.length; i++) {
should_throw_on_null_and_undefined[i].apply(undefined);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
}
@@ -257,7 +257,8 @@ for (var j = 0; j < mapping_functions.length; j++) {
null);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
@@ -268,7 +269,8 @@ for (var j = 0; j < mapping_functions.length; j++) {
undefined);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
}
@@ -309,7 +311,8 @@ for (var j = 0; j < reducing_functions.length; j++) {
reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
@@ -318,7 +321,8 @@ for (var j = 0; j < reducing_functions.length; j++) {
reducing_functions[j].call(array, should_throw_on_null_and_undefined[i]);
} catch (e) {
exception = true;
- checkExpectedMessage(e);
+ assertTrue("called_on_null_or_undefined" == e.type ||
+ "null_to_object" == e.type);
}
assertTrue(exception);
}
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part1.js b/deps/v8/test/mjsunit/fuzz-natives-part1.js
index 87f7d0d76..6941d806c 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part1.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part1.js
@@ -147,7 +147,6 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
- "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -196,13 +195,7 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true,
- "_OneByteSeqStringSetChar": true,
- "_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part2.js b/deps/v8/test/mjsunit/fuzz-natives-part2.js
index 2faad1dca..ea8a2cfe1 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part2.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part2.js
@@ -147,7 +147,6 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
- "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -196,13 +195,7 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true,
- "_OneByteSeqStringSetChar": true,
- "_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part3.js b/deps/v8/test/mjsunit/fuzz-natives-part3.js
index ed71d332a..ecfdf9737 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part3.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part3.js
@@ -147,7 +147,6 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
- "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -196,13 +195,7 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true,
- "_OneByteSeqStringSetChar": true,
- "_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/fuzz-natives-part4.js b/deps/v8/test/mjsunit/fuzz-natives-part4.js
index 1b128d594..da045963f 100644
--- a/deps/v8/test/mjsunit/fuzz-natives-part4.js
+++ b/deps/v8/test/mjsunit/fuzz-natives-part4.js
@@ -147,7 +147,6 @@ var knownProblems = {
"PushWithContext": true,
"PushCatchContext": true,
"PushBlockContext": true,
- "PushModuleContext": true,
"LazyCompile": true,
"LazyRecompile": true,
"ParallelRecompile": true,
@@ -196,13 +195,7 @@ var knownProblems = {
// Only applicable to strings.
"_HasCachedArrayIndex": true,
- "_GetCachedArrayIndex": true,
- "_OneByteSeqStringSetChar": true,
- "_TwoByteSeqStringSetChar": true,
-
- // Only for debugging parallel recompilation.
- "InstallRecompiledCode": true,
- "ForceParallelRecompile": true
+ "_GetCachedArrayIndex": true
};
var currentlyUncallable = {
diff --git a/deps/v8/test/mjsunit/harmony/collections.js b/deps/v8/test/mjsunit/harmony/collections.js
index 0219f3936..f3db7ea2b 100644
--- a/deps/v8/test/mjsunit/harmony/collections.js
+++ b/deps/v8/test/mjsunit/harmony/collections.js
@@ -313,60 +313,4 @@ TestBogusReceivers(bogusReceiversTestSet);
// Stress Test
// There is a proposed stress-test available at the es-discuss mailing list
// which cannot be reasonably automated. Check it out by hand if you like:
-// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
-
-
-// Set and Map size getters
-var setSizeDescriptor = Object.getOwnPropertyDescriptor(Set.prototype, 'size');
-assertEquals(undefined, setSizeDescriptor.value);
-assertEquals(undefined, setSizeDescriptor.set);
-assertTrue(setSizeDescriptor.get instanceof Function);
-assertEquals(undefined, setSizeDescriptor.get.prototype);
-assertFalse(setSizeDescriptor.enumerable);
-assertTrue(setSizeDescriptor.configurable);
-
-var s = new Set();
-assertFalse(s.hasOwnProperty('size'));
-for (var i = 0; i < 10; i++) {
- assertEquals(i, s.size);
- s.add(i);
-}
-for (var i = 9; i >= 0; i--) {
- s.delete(i);
- assertEquals(i, s.size);
-}
-
-
-var mapSizeDescriptor = Object.getOwnPropertyDescriptor(Map.prototype, 'size');
-assertEquals(undefined, mapSizeDescriptor.value);
-assertEquals(undefined, mapSizeDescriptor.set);
-assertTrue(mapSizeDescriptor.get instanceof Function);
-assertEquals(undefined, mapSizeDescriptor.get.prototype);
-assertFalse(mapSizeDescriptor.enumerable);
-assertTrue(mapSizeDescriptor.configurable);
-
-var m = new Map();
-assertFalse(m.hasOwnProperty('size'));
-for (var i = 0; i < 10; i++) {
- assertEquals(i, m.size);
- m.set(i, i);
-}
-for (var i = 9; i >= 0; i--) {
- m.delete(i);
- assertEquals(i, m.size);
-}
-
-// Test clear
-var a = new Set();
-s.add(42);
-assertTrue(s.has(42));
-s.clear();
-assertFalse(s.has(42));
-assertEquals(0, s.size);
-
-var m = new Map();
-m.set(42, true);
-assertTrue(m.has(42));
-m.clear();
-assertFalse(m.has(42));
-assertEquals(0, m.size);
+// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/harmony/module-linking.js b/deps/v8/test/mjsunit/harmony/module-linking.js
index 3c0f18c37..a4b272f46 100644
--- a/deps/v8/test/mjsunit/harmony/module-linking.js
+++ b/deps/v8/test/mjsunit/harmony/module-linking.js
@@ -112,7 +112,7 @@ module R {
assertThrows(function() { eval("c = -1") }, SyntaxError)
assertThrows(function() { R.c = -2 }, TypeError)
- // Initialize first bunch of variables.
+ // Initialize first bunch or variables.
export var v = 1
export let l = 2
export const c = 3
diff --git a/deps/v8/test/mjsunit/harmony/object-observe.js b/deps/v8/test/mjsunit/harmony/object-observe.js
deleted file mode 100644
index 04dfb967b..000000000
--- a/deps/v8/test/mjsunit/harmony/object-observe.js
+++ /dev/null
@@ -1,873 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-observation --harmony-proxies --harmony-collections
-
-var allObservers = [];
-function reset() {
- allObservers.forEach(function(observer) { observer.reset(); });
-}
-
-function stringifyNoThrow(arg) {
- try {
- return JSON.stringify(arg);
- } catch (e) {
- return '{<circular reference>}';
- }
-}
-
-function createObserver() {
- "use strict"; // So that |this| in callback can be undefined.
-
- var observer = {
- records: undefined,
- callbackCount: 0,
- reset: function() {
- this.records = undefined;
- this.callbackCount = 0;
- },
- assertNotCalled: function() {
- assertEquals(undefined, this.records);
- assertEquals(0, this.callbackCount);
- },
- assertCalled: function() {
- assertEquals(1, this.callbackCount);
- },
- assertRecordCount: function(count) {
- this.assertCalled();
- assertEquals(count, this.records.length);
- },
- assertCallbackRecords: function(recs) {
- this.assertRecordCount(recs.length);
- for (var i = 0; i < recs.length; i++) {
- if ('name' in recs[i])
- recs[i].name = String(recs[i].name);
- print(i, stringifyNoThrow(this.records[i]), stringifyNoThrow(recs[i]));
- assertSame(this.records[i].object, recs[i].object);
- assertEquals('string', typeof recs[i].type);
- assertPropertiesEqual(this.records[i], recs[i]);
- }
- }
- };
-
- observer.callback = function(r) {
- assertEquals(undefined, this);
- assertEquals('object', typeof r);
- assertTrue(r instanceof Array)
- observer.records = r;
- observer.callbackCount++;
- };
-
- observer.reset();
- allObservers.push(observer);
- return observer;
-}
-
-var observer = createObserver();
-assertEquals("function", typeof observer.callback);
-var obj = {};
-
-function frozenFunction() {}
-Object.freeze(frozenFunction);
-var nonFunction = {};
-var changeRecordWithAccessor = { type: 'foo' };
-var recordCreated = false;
-Object.defineProperty(changeRecordWithAccessor, 'name', {
- get: function() {
- recordCreated = true;
- return "bar";
- },
- enumerable: true
-})
-
-// Object.observe
-assertThrows(function() { Object.observe("non-object", observer.callback); }, TypeError);
-assertThrows(function() { Object.observe(obj, nonFunction); }, TypeError);
-assertThrows(function() { Object.observe(obj, frozenFunction); }, TypeError);
-assertEquals(obj, Object.observe(obj, observer.callback));
-
-// Object.unobserve
-assertThrows(function() { Object.unobserve(4, observer.callback); }, TypeError);
-assertThrows(function() { Object.unobserve(obj, nonFunction); }, TypeError);
-assertEquals(obj, Object.unobserve(obj, observer.callback));
-
-// Object.getNotifier
-var notifier = Object.getNotifier(obj);
-assertSame(notifier, Object.getNotifier(obj));
-assertEquals(null, Object.getNotifier(Object.freeze({})));
-assertFalse(notifier.hasOwnProperty('notify'));
-assertEquals([], Object.keys(notifier));
-var notifyDesc = Object.getOwnPropertyDescriptor(notifier.__proto__, 'notify');
-assertTrue(notifyDesc.configurable);
-assertTrue(notifyDesc.writable);
-assertFalse(notifyDesc.enumerable);
-assertThrows(function() { notifier.notify({}); }, TypeError);
-assertThrows(function() { notifier.notify({ type: 4 }); }, TypeError);
-var notify = notifier.notify;
-assertThrows(function() { notify.call(undefined, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(null, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(5, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call('hello', { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call(false, { type: 'a' }); }, TypeError);
-assertThrows(function() { notify.call({}, { type: 'a' }); }, TypeError);
-assertFalse(recordCreated);
-notifier.notify(changeRecordWithAccessor);
-assertFalse(recordCreated); // not observed yet
-
-// Object.deliverChangeRecords
-assertThrows(function() { Object.deliverChangeRecords(nonFunction); }, TypeError);
-
-Object.observe(obj, observer.callback);
-
-// notify uses to [[CreateOwnProperty]] to create changeRecord;
-reset();
-var protoExpandoAccessed = false;
-Object.defineProperty(Object.prototype, 'protoExpando',
- {
- configurable: true,
- set: function() { protoExpandoAccessed = true; }
- }
-);
-notifier.notify({ type: 'foo', protoExpando: 'val'});
-assertFalse(protoExpandoAccessed);
-delete Object.prototype.protoExpando;
-Object.deliverChangeRecords(observer.callback);
-
-// Multiple records are delivered.
-reset();
-notifier.notify({
- type: 'updated',
- name: 'foo',
- expando: 1
-});
-
-notifier.notify({
- object: notifier, // object property is ignored
- type: 'deleted',
- name: 'bar',
- expando2: 'str'
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: 'foo', type: 'updated', expando: 1 },
- { object: obj, name: 'bar', type: 'deleted', expando2: 'str' }
-]);
-
-// No delivery takes place if no records are pending
-reset();
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Multiple observation has no effect.
-reset();
-Object.observe(obj, observer.callback);
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCalled();
-
-// Observation can be stopped.
-reset();
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Multiple unobservation has no effect
-reset();
-Object.unobserve(obj, observer.callback);
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
-});
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
-// Re-observation works and only includes changeRecords after of call.
-reset();
-Object.getNotifier(obj).notify({
- type: 'foo',
-});
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
-});
-records = undefined;
-Object.deliverChangeRecords(observer.callback);
-observer.assertRecordCount(1);
-
-// Observing a continuous stream of changes, while itermittantly unobserving.
-reset();
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
- val: 1
-});
-
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
- val: 2
-});
-
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
- val: 3
-});
-
-Object.unobserve(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
- val: 4
-});
-
-Object.observe(obj, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo',
- val: 5
-});
-
-Object.unobserve(obj, observer.callback);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'foo', val: 1 },
- { object: obj, type: 'foo', val: 3 },
- { object: obj, type: 'foo', val: 5 }
-]);
-
-// Observing multiple objects; records appear in order.
-reset();
-var obj2 = {};
-var obj3 = {}
-Object.observe(obj, observer.callback);
-Object.observe(obj3, observer.callback);
-Object.observe(obj2, observer.callback);
-Object.getNotifier(obj).notify({
- type: 'foo1',
-});
-Object.getNotifier(obj2).notify({
- type: 'foo2',
-});
-Object.getNotifier(obj3).notify({
- type: 'foo3',
-});
-Object.observe(obj3, observer.callback);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, type: 'foo1' },
- { object: obj2, type: 'foo2' },
- { object: obj3, type: 'foo3' }
-]);
-
-// Observing named properties.
-reset();
-var obj = {a: 1}
-Object.observe(obj, observer.callback);
-obj.a = 2;
-obj["a"] = 3;
-delete obj.a;
-obj.a = 4;
-obj.a = 4; // ignored
-obj.a = 5;
-Object.defineProperty(obj, "a", {value: 6});
-Object.defineProperty(obj, "a", {writable: false});
-obj.a = 7; // ignored
-Object.defineProperty(obj, "a", {value: 8});
-Object.defineProperty(obj, "a", {value: 7, writable: true});
-Object.defineProperty(obj, "a", {get: function() {}});
-Object.defineProperty(obj, "a", {get: frozenFunction});
-Object.defineProperty(obj, "a", {get: frozenFunction}); // ignored
-Object.defineProperty(obj, "a", {get: frozenFunction, set: frozenFunction});
-Object.defineProperty(obj, "a", {set: frozenFunction}); // ignored
-Object.defineProperty(obj, "a", {get: undefined, set: frozenFunction});
-delete obj.a;
-delete obj.a;
-Object.defineProperty(obj, "a", {get: function() {}, configurable: true});
-Object.defineProperty(obj, "a", {value: 9, writable: true});
-obj.a = 10;
-delete obj.a;
-Object.defineProperty(obj, "a", {value: 11, configurable: true});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "a", type: "updated", oldValue: 1 },
- { object: obj, name: "a", type: "updated", oldValue: 2 },
- { object: obj, name: "a", type: "deleted", oldValue: 3 },
- { object: obj, name: "a", type: "new" },
- { object: obj, name: "a", type: "updated", oldValue: 4 },
- { object: obj, name: "a", type: "updated", oldValue: 5 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 6 },
- { object: obj, name: "a", type: "updated", oldValue: 6 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 8 },
- { object: obj, name: "a", type: "reconfigured", oldValue: 7 },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "deleted" },
- { object: obj, name: "a", type: "new" },
- { object: obj, name: "a", type: "reconfigured" },
- { object: obj, name: "a", type: "updated", oldValue: 9 },
- { object: obj, name: "a", type: "deleted", oldValue: 10 },
- { object: obj, name: "a", type: "new" },
-]);
-
-// Observing indexed properties.
-reset();
-var obj = {'1': 1}
-Object.observe(obj, observer.callback);
-obj[1] = 2;
-obj[1] = 3;
-delete obj[1];
-obj[1] = 4;
-obj[1] = 4; // ignored
-obj[1] = 5;
-Object.defineProperty(obj, "1", {value: 6});
-Object.defineProperty(obj, "1", {writable: false});
-obj[1] = 7; // ignored
-Object.defineProperty(obj, "1", {value: 8});
-Object.defineProperty(obj, "1", {value: 7, writable: true});
-Object.defineProperty(obj, "1", {get: function() {}});
-Object.defineProperty(obj, "1", {get: frozenFunction});
-Object.defineProperty(obj, "1", {get: frozenFunction}); // ignored
-Object.defineProperty(obj, "1", {get: frozenFunction, set: frozenFunction});
-Object.defineProperty(obj, "1", {set: frozenFunction}); // ignored
-Object.defineProperty(obj, "1", {get: undefined, set: frozenFunction});
-delete obj[1];
-delete obj[1];
-Object.defineProperty(obj, "1", {get: function() {}, configurable: true});
-Object.defineProperty(obj, "1", {value: 9, writable: true});
-obj[1] = 10;
-delete obj[1];
-Object.defineProperty(obj, "1", {value: 11, configurable: true});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "1", type: "updated", oldValue: 1 },
- { object: obj, name: "1", type: "updated", oldValue: 2 },
- { object: obj, name: "1", type: "deleted", oldValue: 3 },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "1", type: "updated", oldValue: 4 },
- { object: obj, name: "1", type: "updated", oldValue: 5 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 6 },
- { object: obj, name: "1", type: "updated", oldValue: 6 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 8 },
- { object: obj, name: "1", type: "reconfigured", oldValue: 7 },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "deleted" },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "1", type: "reconfigured" },
- { object: obj, name: "1", type: "updated", oldValue: 9 },
- { object: obj, name: "1", type: "deleted", oldValue: 10 },
- { object: obj, name: "1", type: "new" },
-]);
-
-
-// Test all kinds of objects generically.
-function TestObserveConfigurable(obj, prop) {
- reset();
- obj[prop] = 1;
- Object.observe(obj, observer.callback);
- obj[prop] = 2;
- obj[prop] = 3;
- delete obj[prop];
- obj[prop] = 4;
- obj[prop] = 4; // ignored
- obj[prop] = 5;
- Object.defineProperty(obj, prop, {value: 6});
- Object.defineProperty(obj, prop, {writable: false});
- obj[prop] = 7; // ignored
- Object.defineProperty(obj, prop, {value: 8});
- Object.defineProperty(obj, prop, {value: 7, writable: true});
- Object.defineProperty(obj, prop, {get: function() {}});
- Object.defineProperty(obj, prop, {get: frozenFunction});
- Object.defineProperty(obj, prop, {get: frozenFunction}); // ignored
- Object.defineProperty(obj, prop, {get: frozenFunction, set: frozenFunction});
- Object.defineProperty(obj, prop, {set: frozenFunction}); // ignored
- Object.defineProperty(obj, prop, {get: undefined, set: frozenFunction});
- obj.__defineSetter__(prop, frozenFunction); // ignored
- obj.__defineSetter__(prop, function() {});
- obj.__defineGetter__(prop, function() {});
- delete obj[prop];
- delete obj[prop]; // ignored
- obj.__defineGetter__(prop, function() {});
- delete obj[prop];
- Object.defineProperty(obj, prop, {get: function() {}, configurable: true});
- Object.defineProperty(obj, prop, {value: 9, writable: true});
- obj[prop] = 10;
- delete obj[prop];
- Object.defineProperty(obj, prop, {value: 11, configurable: true});
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: obj, name: prop, type: "updated", oldValue: 1 },
- { object: obj, name: prop, type: "updated", oldValue: 2 },
- { object: obj, name: prop, type: "deleted", oldValue: 3 },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "updated", oldValue: 4 },
- { object: obj, name: prop, type: "updated", oldValue: 5 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 6 },
- { object: obj, name: prop, type: "updated", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 8 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "deleted" },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "deleted" },
- { object: obj, name: prop, type: "new" },
- { object: obj, name: prop, type: "reconfigured" },
- { object: obj, name: prop, type: "updated", oldValue: 9 },
- { object: obj, name: prop, type: "deleted", oldValue: 10 },
- { object: obj, name: prop, type: "new" },
- ]);
- Object.unobserve(obj, observer.callback);
- delete obj[prop];
-}
-
-function TestObserveNonConfigurable(obj, prop, desc) {
- reset();
- obj[prop] = 1;
- Object.observe(obj, observer.callback);
- obj[prop] = 4;
- obj[prop] = 4; // ignored
- obj[prop] = 5;
- Object.defineProperty(obj, prop, {value: 6});
- Object.defineProperty(obj, prop, {value: 6}); // ignored
- Object.defineProperty(obj, prop, {value: 7});
- Object.defineProperty(obj, prop,
- {enumerable: desc.enumerable}); // ignored
- Object.defineProperty(obj, prop, {writable: false});
- obj[prop] = 7; // ignored
- Object.deliverChangeRecords(observer.callback);
- observer.assertCallbackRecords([
- { object: obj, name: prop, type: "updated", oldValue: 1 },
- { object: obj, name: prop, type: "updated", oldValue: 4 },
- { object: obj, name: prop, type: "updated", oldValue: 5 },
- { object: obj, name: prop, type: "updated", oldValue: 6 },
- { object: obj, name: prop, type: "reconfigured", oldValue: 7 },
- ]);
- Object.unobserve(obj, observer.callback);
-}
-
-function createProxy(create, x) {
- var handler = {
- getPropertyDescriptor: function(k) {
- for (var o = this.target; o; o = Object.getPrototypeOf(o)) {
- var desc = Object.getOwnPropertyDescriptor(o, k);
- if (desc) return desc;
- }
- return undefined;
- },
- getOwnPropertyDescriptor: function(k) {
- return Object.getOwnPropertyDescriptor(this.target, k);
- },
- defineProperty: function(k, desc) {
- var x = Object.defineProperty(this.target, k, desc);
- Object.deliverChangeRecords(this.callback);
- return x;
- },
- delete: function(k) {
- var x = delete this.target[k];
- Object.deliverChangeRecords(this.callback);
- return x;
- },
- getPropertyNames: function() {
- return Object.getOwnPropertyNames(this.target);
- },
- target: {isProxy: true},
- callback: function(changeRecords) {
- print("callback", stringifyNoThrow(handler.proxy), stringifyNoThrow(got));
- for (var i in changeRecords) {
- var got = changeRecords[i];
- var change = {object: handler.proxy, name: got.name, type: got.type};
- if ("oldValue" in got) change.oldValue = got.oldValue;
- Object.getNotifier(handler.proxy).notify(change);
- }
- },
- };
- Object.observe(handler.target, handler.callback);
- return handler.proxy = create(handler, x);
-}
-
-var objects = [
- {},
- [],
- this, // global object
- function(){},
- (function(){ return arguments })(),
- (function(){ "use strict"; return arguments })(),
- Object(1), Object(true), Object("bla"),
- new Date(),
- Object, Function, Date, RegExp,
- new Set, new Map, new WeakMap,
- new ArrayBuffer(10), new Int32Array(5),
- createProxy(Proxy.create, null),
- createProxy(Proxy.createFunction, function(){}),
-];
-var properties = ["a", "1", 1, "length", "prototype"];
-
-// Cases that yield non-standard results.
-// TODO(observe): ...or don't work yet.
-function blacklisted(obj, prop) {
- return (obj instanceof Int32Array && prop == 1) ||
- (obj instanceof Int32Array && prop === "length") ||
- (obj instanceof ArrayBuffer && prop == 1) ||
- // TODO(observe): oldValue when reconfiguring array length
- (obj instanceof Array && prop === "length")
-}
-
-for (var i in objects) for (var j in properties) {
- var obj = objects[i];
- var prop = properties[j];
- if (blacklisted(obj, prop)) continue;
- var desc = Object.getOwnPropertyDescriptor(obj, prop);
- print("***", typeof obj, stringifyNoThrow(obj), prop);
- if (!desc || desc.configurable)
- TestObserveConfigurable(obj, prop);
- else if (desc.writable)
- TestObserveNonConfigurable(obj, prop, desc);
-}
-
-
-// Observing array length (including truncation)
-reset();
-var arr = ['a', 'b', 'c', 'd'];
-var arr2 = ['alpha', 'beta'];
-var arr3 = ['hello'];
-arr3[2] = 'goodbye';
-arr3.length = 6;
-// TODO(adamk): Enable this test case when it can run in a reasonable
-// amount of time.
-//var slow_arr = new Array(1000000000);
-//slow_arr[500000000] = 'hello';
-Object.defineProperty(arr, '0', {configurable: false});
-Object.defineProperty(arr, '2', {get: function(){}});
-Object.defineProperty(arr2, '0', {get: function(){}, configurable: false});
-Object.observe(arr, observer.callback);
-Object.observe(arr2, observer.callback);
-Object.observe(arr3, observer.callback);
-arr.length = 2;
-arr.length = 0;
-arr.length = 10;
-arr2.length = 0;
-arr2.length = 1; // no change expected
-arr3.length = 0;
-Object.defineProperty(arr3, 'length', {value: 5});
-Object.defineProperty(arr3, 'length', {value: 10, writable: false});
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'deleted', oldValue: 'd' },
- { object: arr, name: '2', type: 'deleted' },
- { object: arr, name: 'length', type: 'updated', oldValue: 4 },
- { object: arr, name: '1', type: 'deleted', oldValue: 'b' },
- { object: arr, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr, name: 'length', type: 'updated', oldValue: 1 },
- { object: arr2, name: '1', type: 'deleted', oldValue: 'beta' },
- { object: arr2, name: 'length', type: 'updated', oldValue: 2 },
- { object: arr3, name: '2', type: 'deleted', oldValue: 'goodbye' },
- { object: arr3, name: '0', type: 'deleted', oldValue: 'hello' },
- { object: arr3, name: 'length', type: 'updated', oldValue: 6 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 0 },
- { object: arr3, name: 'length', type: 'updated', oldValue: 5 },
- // TODO(adamk): This record should be merged with the above
- { object: arr3, name: 'length', type: 'reconfigured' },
-]);
-
-// Assignments in loops (checking different IC states).
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-for (var i = 0; i < 5; i++) {
- obj["a" + i] = i;
-}
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "a0", type: "new" },
- { object: obj, name: "a1", type: "new" },
- { object: obj, name: "a2", type: "new" },
- { object: obj, name: "a3", type: "new" },
- { object: obj, name: "a4", type: "new" },
-]);
-
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-for (var i = 0; i < 5; i++) {
- obj[i] = i;
-}
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: "0", type: "new" },
- { object: obj, name: "1", type: "new" },
- { object: obj, name: "2", type: "new" },
- { object: obj, name: "3", type: "new" },
- { object: obj, name: "4", type: "new" },
-]);
-
-// Adding elements past the end of an array should notify on length
-reset();
-var arr = [1, 2, 3];
-Object.observe(arr, observer.callback);
-arr[3] = 10;
-arr[100] = 20;
-Object.defineProperty(arr, '200', {value: 7});
-Object.defineProperty(arr, '400', {get: function(){}});
-arr[50] = 30; // no length change expected
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: arr, name: '3', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 3 },
- { object: arr, name: '100', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 4 },
- { object: arr, name: '200', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 101 },
- { object: arr, name: '400', type: 'new' },
- { object: arr, name: 'length', type: 'updated', oldValue: 201 },
- { object: arr, name: '50', type: 'new' },
-]);
-
-// Tests for array methods, first on arrays and then on plain objects
-//
-// === ARRAYS ===
-//
-// Push
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.push(3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '2', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
-]);
-
-// Pop
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.pop();
-array.pop();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 1 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
-]);
-
-// Shift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.shift();
-array.shift();
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
-]);
-
-// Unshift
-reset();
-var array = [1, 2];
-Object.observe(array, observer.callback);
-array.unshift(3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'new' },
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
-]);
-
-// Splice
-reset();
-var array = [1, 2, 3];
-Object.observe(array, observer.callback);
-array.splice(1, 1, 4, 5);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 3 },
-]);
-
-//
-// === PLAIN OBJECTS ===
-//
-// Push
-reset()
-var array = {0: 1, 1: 2, length: 2}
-Object.observe(array, observer.callback);
-Array.prototype.push.call(array, 3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '2', type: 'new' },
- { object: array, name: '3', type: 'new' },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
-]);
-
-// Pop
-reset()
-var array = {0: 1, 1: 2, length: 2};
-Object.observe(array, observer.callback);
-Array.prototype.pop.call(array);
-Array.prototype.pop.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 1 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
-]);
-
-// Shift
-reset()
-var array = {0: 1, 1: 2, length: 2};
-Object.observe(array, observer.callback);
-Array.prototype.shift.call(array);
-Array.prototype.shift.call(array);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
- { object: array, name: '0', type: 'deleted', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 1 },
-]);
-
-// Unshift
-reset()
-var array = {0: 1, 1: 2, length: 2};
-Object.observe(array, observer.callback);
-Array.prototype.unshift.call(array, 3, 4);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: '2', type: 'new' },
- { object: array, name: '0', type: 'updated', oldValue: 1 },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: 'length', type: 'updated', oldValue: 2 },
-]);
-
-// Splice
-reset()
-var array = {0: 1, 1: 2, 2: 3, length: 3};
-Object.observe(array, observer.callback);
-Array.prototype.splice.call(array, 1, 1, 4, 5);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '3', type: 'new' },
- { object: array, name: '1', type: 'updated', oldValue: 2 },
- { object: array, name: '2', type: 'updated', oldValue: 3 },
- { object: array, name: 'length', type: 'updated', oldValue: 3 },
-]);
-
-// Exercise StoreIC_ArrayLength
-reset();
-var dummy = {};
-Object.observe(dummy, observer.callback);
-Object.unobserve(dummy, observer.callback);
-var array = [0];
-Object.observe(array, observer.callback);
-array.splice(0, 1);
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: array, name: '0', type: 'deleted', oldValue: 0 },
- { object: array, name: 'length', type: 'updated', oldValue: 1},
-]);
-
-
-// __proto__
-reset();
-var obj = {};
-Object.observe(obj, observer.callback);
-var p = {foo: 'yes'};
-var q = {bar: 'no'};
-obj.__proto__ = p;
-obj.__proto__ = p; // ignored
-obj.__proto__ = null;
-obj.__proto__ = q;
-// TODO(adamk): Add tests for objects with hidden prototypes
-// once we support observing the global object.
-Object.deliverChangeRecords(observer.callback);
-observer.assertCallbackRecords([
- { object: obj, name: '__proto__', type: 'prototype',
- oldValue: Object.prototype },
- { object: obj, name: '__proto__', type: 'prototype', oldValue: p },
- { object: obj, name: '__proto__', type: 'prototype', oldValue: null },
-]);
-
-// Function.prototype
-reset();
-var fun = function(){};
-Object.observe(fun, observer.callback);
-var myproto = {foo: 'bar'};
-fun.prototype = myproto;
-fun.prototype = 7;
-fun.prototype = 7; // ignored
-Object.defineProperty(fun, 'prototype', {value: 8});
-Object.deliverChangeRecords(observer.callback);
-observer.assertRecordCount(3);
-// Manually examine the first record in order to test
-// lazy creation of oldValue
-assertSame(fun, observer.records[0].object);
-assertEquals('prototype', observer.records[0].name);
-assertEquals('updated', observer.records[0].type);
-// The only existing reference to the oldValue object is in this
-// record, so to test that lazy creation happened correctly
-// we compare its constructor to our function (one of the invariants
-// ensured when creating an object via AllocateFunctionPrototype).
-assertSame(fun, observer.records[0].oldValue.constructor);
-observer.records.splice(0, 1);
-observer.assertCallbackRecords([
- { object: fun, name: 'prototype', type: 'updated', oldValue: myproto },
- { object: fun, name: 'prototype', type: 'updated', oldValue: 7 },
-]);
-
-// Function.prototype should not be observable except on the object itself
-reset();
-var fun = function(){};
-var obj = { __proto__: fun };
-Object.observe(obj, observer.callback);
-obj.prototype = 7;
-Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
diff --git a/deps/v8/test/mjsunit/harmony/proxies-json.js b/deps/v8/test/mjsunit/harmony/proxies-json.js
deleted file mode 100644
index 539c5a84c..000000000
--- a/deps/v8/test/mjsunit/harmony/proxies-json.js
+++ /dev/null
@@ -1,178 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony
-
-function testStringify(expected, object) {
- // Test fast case that bails out to slow case.
- assertEquals(expected, JSON.stringify(object));
- // Test slow case.
- assertEquals(expected, JSON.stringify(object, undefined, 0));
-}
-
-// Test serializing a proxy, function proxy and objects that contain them.
-var handler1 = {
- get: function(target, name) {
- return name.toUpperCase();
- },
- enumerate: function(target) {
- return ['a', 'b', 'c'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy1 = Proxy.create(handler1);
-testStringify('{"a":"A","b":"B","c":"C"}', proxy1);
-
-var proxy_fun = Proxy.createFunction(handler1, function() { return 1; });
-testStringify(undefined, proxy_fun);
-testStringify('[1,null]', [1, proxy_fun]);
-
-var parent1a = { b: proxy1 };
-testStringify('{"b":{"a":"A","b":"B","c":"C"}}', parent1a);
-
-var parent1b = { a: 123, b: proxy1, c: true };
-testStringify('{"a":123,"b":{"a":"A","b":"B","c":"C"},"c":true}', parent1b);
-
-var parent1c = [123, proxy1, true];
-testStringify('[123,{"a":"A","b":"B","c":"C"},true]', parent1c);
-
-// Proxy with side effect.
-var handler2 = {
- get: function(target, name) {
- delete parent2.c;
- return name.toUpperCase();
- },
- enumerate: function(target) {
- return ['a', 'b', 'c'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy2 = Proxy.create(handler2);
-var parent2 = { a: "delete", b: proxy2, c: "remove" };
-var expected2 = '{"a":"delete","b":{"a":"A","b":"B","c":"C"}}';
-assertEquals(expected2, JSON.stringify(parent2));
-parent2.c = "remove"; // Revert side effect.
-assertEquals(expected2, JSON.stringify(parent2, undefined, 0));
-
-// Proxy with a get function that uses the first argument.
-var handler3 = {
- get: function(target, name) {
- if (name == 'valueOf') return function() { return "proxy" };
- return name + "(" + target + ")";
- },
- enumerate: function(target) {
- return ['a', 'b', 'c'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy3 = Proxy.create(handler3);
-var parent3 = { x: 123, y: proxy3 }
-testStringify('{"x":123,"y":{"a":"a(proxy)","b":"b(proxy)","c":"c(proxy)"}}',
- parent3);
-
-// Empty proxy.
-var handler4 = {
- get: function(target, name) {
- return 0;
- },
- enumerate: function(target) {
- return [];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: false };
- }
-}
-
-var proxy4 = Proxy.create(handler4);
-testStringify('{}', proxy4);
-testStringify('{"a":{}}', { a: proxy4 });
-
-// Proxy that provides a toJSON function that uses this.
-var handler5 = {
- get: function(target, name) {
- if (name == 'z') return 97000;
- return function(key) { return key.charCodeAt(0) + this.z; };
- },
- enumerate: function(target) {
- return ['toJSON', 'z'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy5 = Proxy.create(handler5);
-testStringify('{"a":97097}', { a: proxy5 });
-
-// Proxy that provides a toJSON function that returns undefined.
-var handler6 = {
- get: function(target, name) {
- return function(key) { return undefined; };
- },
- enumerate: function(target) {
- return ['toJSON'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy6 = Proxy.create(handler6);
-testStringify('[1,null,true]', [1, proxy6, true]);
-testStringify('{"a":1,"c":true}', {a: 1, b: proxy6, c: true});
-
-// Object containing a proxy that changes the parent's properties.
-var handler7 = {
- get: function(target, name) {
- delete parent7.a;
- delete parent7.c;
- parent7.e = "5";
- return name.toUpperCase();
- },
- enumerate: function(target) {
- return ['a', 'b', 'c'];
- },
- getOwnPropertyDescriptor: function(target, name) {
- return { enumerable: true };
- }
-}
-
-var proxy7 = Proxy.create(handler7);
-var parent7 = { a: "1", b: proxy7, c: "3", d: "4" };
-assertEquals('{"a":"1","b":{"a":"A","b":"B","c":"C"},"d":"4"}',
- JSON.stringify(parent7));
-assertEquals('{"b":{"a":"A","b":"B","c":"C"},"d":"4","e":"5"}',
- JSON.stringify(parent7));
diff --git a/deps/v8/test/mjsunit/harmony/proxies.js b/deps/v8/test/mjsunit/harmony/proxies.js
index 04fc76949..7170ffd9c 100644
--- a/deps/v8/test/mjsunit/harmony/proxies.js
+++ b/deps/v8/test/mjsunit/harmony/proxies.js
@@ -649,11 +649,6 @@ function TestSetForDerived2(create, trap) {
TestSetForDerived(
function(k) {
- // TODO(yangguo): issue 2398 - throwing an error causes formatting of
- // the message string, which can be observable through this handler.
- // We ignore keys that occur when formatting the message string.
- if (k == "toString" || k == "valueOf") return;
-
key = k;
switch (k) {
case "p_writable": return {writable: true, configurable: true}
diff --git a/deps/v8/test/mjsunit/json-parser-recursive.js b/deps/v8/test/mjsunit/json-parser-recursive.js
deleted file mode 100644
index 1e00c83c8..000000000
--- a/deps/v8/test/mjsunit/json-parser-recursive.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var str = "[1]";
-for (var i = 0; i < 100000; i++) {
- str = "[1," + str + "]";
-}
-
-assertThrows(function() { JSON.parse(str); }, RangeError);
diff --git a/deps/v8/test/mjsunit/json-stringify-recursive.js b/deps/v8/test/mjsunit/json-stringify-recursive.js
deleted file mode 100644
index 31aa0027c..000000000
--- a/deps/v8/test/mjsunit/json-stringify-recursive.js
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var a = {};
-for (i = 0; i < 10000; i++) {
- var current = {};
- current.a = a;
- a = current;
-}
-
-function rec(a,b,c,d,e,f,g,h,i,j,k,l,m,n) {
- JSON.stringify(a);
- rec(a,b,c,d,e,f,g,h,i,j,k,l,m,n);
-}
-
-assertThrows(function() { rec(1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4) },
- RangeError);
-
-
-var depth = 10000;
-var deepArray = [];
-for (var i = 0; i < depth; i++) deepArray = [deepArray];
-assertThrows(function() { JSON.stringify(deepArray); }, RangeError);
-
-
-var deepObject = {};
-for (var i = 0; i < depth; i++) deepObject = { next: deepObject };
-assertThrows(function() { JSON.stringify(deepObject); }, RangeError);
diff --git a/deps/v8/test/mjsunit/json.js b/deps/v8/test/mjsunit/json.js
index 6e91725c7..54fa1854f 100644
--- a/deps/v8/test/mjsunit/json.js
+++ b/deps/v8/test/mjsunit/json.js
@@ -257,42 +257,6 @@ assertEquals("[1,2,[3,[4],5],6,7]",
assertEquals("[2,4,[6,[8],10],12,14]",
JSON.stringify([1, 2, [3, [4], 5], 6, 7], DoubleNumbers));
assertEquals('["a","ab","abc"]', JSON.stringify(["a","ab","abc"]));
-assertEquals('{"a":1,"c":true}',
- JSON.stringify({ a : 1,
- b : function() { 1 },
- c : true,
- d : function() { 2 } }));
-assertEquals('[1,null,true,null]',
- JSON.stringify([1, function() { 1 }, true, function() { 2 }]));
-assertEquals('"toJSON 123"',
- JSON.stringify({ toJSON : function() { return 'toJSON 123'; } }));
-assertEquals('{"a":321}',
- JSON.stringify({ a : { toJSON : function() { return 321; } } }));
-var counter = 0;
-assertEquals('{"getter":123}',
- JSON.stringify({ get getter() { counter++; return 123; } }));
-assertEquals(1, counter);
-assertEquals('{"a":"abc","b":"\u1234bc"}',
- JSON.stringify({ a : "abc", b : "\u1234bc" }));
-
-
-var a = { a : 1, b : 2 };
-delete a.a;
-assertEquals('{"b":2}', JSON.stringify(a));
-
-var b = {};
-b.__proto__ = { toJSON : function() { return 321;} };
-assertEquals("321", JSON.stringify(b));
-
-var array = [""];
-var expected = '""';
-for (var i = 0; i < 10000; i++) {
- array.push("");
- expected = '"",' + expected;
-}
-expected = '[' + expected + ']';
-assertEquals(expected, JSON.stringify(array));
-
var circular = [1, 2, 3];
circular[2] = circular;
diff --git a/deps/v8/test/mjsunit/json2.js b/deps/v8/test/mjsunit/json2.js
deleted file mode 100644
index 4c0b8f58c..000000000
--- a/deps/v8/test/mjsunit/json2.js
+++ /dev/null
@@ -1,153 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// Test JSON.stringify on the global object.
-var a = 12345;
-assertTrue(JSON.stringify(this).indexOf('"a":12345') > 0);
-
-// Test JSON.stringify of array in dictionary mode.
-var array_1 = [];
-var array_2 = [];
-array_1[100000] = 1;
-array_2[100000] = function() { return 1; };
-var nulls = "";
-for (var i = 0; i < 100000; i++) {
- nulls += 'null,';
-}
-expected_1 = '[' + nulls + '1]';
-expected_2 = '[' + nulls + 'null]';
-assertEquals(expected_1, JSON.stringify(array_1));
-assertEquals(expected_2, JSON.stringify(array_2));
-
-// Test JSValue with custom prototype.
-var num_wrapper = Object(42);
-num_wrapper.__proto__ = { __proto__: null,
- toString: function() { return true; } };
-assertEquals('1', JSON.stringify(num_wrapper));
-
-var str_wrapper = Object('2');
-str_wrapper.__proto__ = { __proto__: null,
- toString: function() { return true; } };
-assertEquals('"true"', JSON.stringify(str_wrapper));
-
-var bool_wrapper = Object(false);
-bool_wrapper.__proto__ = { __proto__: null,
- toString: function() { return true; } };
-// Note that toString function is not evaluated here!
-assertEquals('false', JSON.stringify(bool_wrapper));
-
-// Test getters.
-var counter = 0;
-var getter_obj = { get getter() {
- counter++;
- return 123;
- } };
-assertEquals('{"getter":123}', JSON.stringify(getter_obj));
-assertEquals(1, counter);
-
-// Test toJSON function.
-var tojson_obj = { toJSON: function() {
- counter++;
- return [1, 2];
- },
- a: 1};
-assertEquals('[1,2]', JSON.stringify(tojson_obj));
-assertEquals(2, counter);
-
-// Test that we don't recursively look for the toJSON function.
-var tojson_proto_obj = { a: 'fail' };
-tojson_proto_obj.__proto__ = { toJSON: function() {
- counter++;
- return tojson_obj;
- } };
-assertEquals('{"a":1}', JSON.stringify(tojson_proto_obj));
-
-// Test toJSON produced by a getter.
-var tojson_via_getter = { get toJSON() {
- return function(x) {
- counter++;
- return 321;
- };
- },
- a: 1 };
-assertEquals('321', JSON.stringify(tojson_via_getter));
-
-// Test toJSON with key.
-tojson_obj = { toJSON: function(key) { return key + key; } };
-var tojson_with_key_1 = { a: tojson_obj, b: tojson_obj };
-assertEquals('{"a":"aa","b":"bb"}', JSON.stringify(tojson_with_key_1));
-var tojson_with_key_2 = [ tojson_obj, tojson_obj ];
-assertEquals('["00","11"]', JSON.stringify(tojson_with_key_2));
-
-// Test toJSON with exception.
-var tojson_ex = { toJSON: function(key) { throw "123" } };
-assertThrows(function() { JSON.stringify(tojson_ex); });
-
-// Test toJSON with access to this.
-var obj = { toJSON: function(key) { return this.a + key; }, a: "x" };
-assertEquals('{"y":"xy"}', JSON.stringify({y: obj}));
-
-// Test holes in arrays.
-var fast_smi = [1, 2, 3, 4];
-fast_smi.__proto__ = [7, 7, 7, 7];
-delete fast_smi[2];
-assertTrue(%HasFastSmiElements(fast_smi));
-assertEquals("[1,2,7,4]", JSON.stringify(fast_smi));
-
-var fast_double = [1.1, 2, 3, 4];
-fast_double.__proto__ = [7, 7, 7, 7];
-
-delete fast_double[2];
-assertTrue(%HasFastDoubleElements(fast_double));
-assertEquals("[1.1,2,7,4]", JSON.stringify(fast_double));
-
-var fast_obj = [1, 2, {}, {}];
-fast_obj.__proto__ = [7, 7, 7, 7];
-
-delete fast_obj[2];
-assertTrue(%HasFastObjectElements(fast_obj));
-assertEquals("[1,2,7,{}]", JSON.stringify(fast_obj));
-
-var getter_side_effect = { a: 1,
- get b() {
- delete this.a;
- delete this.c;
- this.e = 5;
- return 2;
- },
- c: 3,
- d: 4 };
-assertEquals('{"a":1,"b":2,"d":4}', JSON.stringify(getter_side_effect));
-assertEquals('{"b":2,"d":4,"e":5}', JSON.stringify(getter_side_effect));
-
-var non_enum = {};
-non_enum.a = 1;
-Object.defineProperty(non_enum, "b", { value: 2, enumerable: false });
-non_enum.c = 3;
-assertEquals('{"a":1,"c":3}', JSON.stringify(non_enum));
diff --git a/deps/v8/test/mjsunit/manual-parallel-recompile.js b/deps/v8/test/mjsunit/manual-parallel-recompile.js
deleted file mode 100644
index 26b160537..000000000
--- a/deps/v8/test/mjsunit/manual-parallel-recompile.js
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --expose-gc
-// Flags: --parallel-recompilation --manual-parallel-recompilation
-
-function assertOptimized(fun) {
- // This assertion takes --always-opt and --nocrankshaft flags into account.
- assertTrue(%GetOptimizationStatus(fun) != 2);
-}
-
-function assertUnoptimized(fun) {
- assertTrue(%GetOptimizationStatus(fun) != 1);
-}
-
-function f(x) {
- var xx = x * x;
- var xxstr = xx.toString();
- return xxstr.length;
-}
-
-function g(x) {
- var xxx = Math.sqrt(x) | 0;
- var xxxstr = xxx.toString();
- return xxxstr.length;
-}
-
-function k(x) {
- return x * x;
-}
-
-f(g(1));
-f(g(2));
-assertUnoptimized(f);
-assertUnoptimized(g);
-
-%ForceParallelRecompile(f);
-%ForceParallelRecompile(g);
-assertUnoptimized(f);
-assertUnoptimized(g);
-
-var sum = 0;
-for (var i = 0; i < 10000; i++) sum += f(i) + g(i);
-gc();
-
-assertEquals(95274, sum);
-assertUnoptimized(f);
-assertUnoptimized(g);
-
-%InstallRecompiledCode(f);
-assertOptimized(f);
-assertUnoptimized(g);
-
-%InstallRecompiledCode(g);
-assertOptimized(g);
diff --git a/deps/v8/test/mjsunit/math-exp-precision.js b/deps/v8/test/mjsunit/math-exp-precision.js
deleted file mode 100644
index ace7edc58..000000000
--- a/deps/v8/test/mjsunit/math-exp-precision.js
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Tests that the --fast-math implementation of Math.exp() has
-// reasonable precision.
-
-function exp(x) {
- return Math.exp(x);
-}
-
-var first_call_result = exp(Math.PI);
-var second_call_result = exp(Math.PI);
-
-function assertAlmostEquals(expected, actual, x) {
- if (expected == 0 && actual == 0) return; // OK
- if (expected == Number.POSITIVE_INFINITY &&
- actual == Number.POSITIVE_INFINITY) {
- return; // OK
- }
- relative_diff = Math.abs(expected/actual - 1);
- assertTrue(relative_diff < 1e-12, "relative difference of " + relative_diff +
- " for input " + x);
-}
-
-var increment = Math.PI / 35; // Roughly 0.1, but we want to try many
- // different mantissae.
-for (var x = -708; x < 710; x += increment) {
- var ex = exp(x);
- var reference = Math.pow(Math.E, x);
- assertAlmostEquals(reference, ex, x);
- if (ex > 0 && isFinite(ex)) {
- var back = Math.log(ex);
- assertAlmostEquals(x, back, x + " (backwards)");
- }
-}
-
-// Make sure optimizing the function does not alter the result.
-var last_call_result = exp(Math.PI);
-assertEquals(first_call_result, second_call_result);
-assertEquals(first_call_result, last_call_result);
diff --git a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
index 734916585..274349084 100644
--- a/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
+++ b/deps/v8/test/mjsunit/math-floor-of-div-minus-zero.js
@@ -35,7 +35,6 @@ function test_div_no_deopt_minus_zero() {
}
test_div_no_deopt_minus_zero();
-test_div_no_deopt_minus_zero();
%OptimizeFunctionOnNextCall(test_div_no_deopt_minus_zero);
test_div_no_deopt_minus_zero();
assertTrue(2 != %GetOptimizationStatus(test_div_no_deopt_minus_zero));
diff --git a/deps/v8/test/mjsunit/mjsunit.status b/deps/v8/test/mjsunit/mjsunit.status
index 0bf378b47..037093bf7 100644
--- a/deps/v8/test/mjsunit/mjsunit.status
+++ b/deps/v8/test/mjsunit/mjsunit.status
@@ -44,7 +44,6 @@ regress/regress-524: SKIP
# Too slow in debug mode with --stress-opt
compiler/regress-stacktrace-methods: PASS, SKIP if $mode == debug
compiler/regress-funcaller: PASS, SKIP if $mode == debug
-regress/regress-2318: PASS, SKIP if $mode == debug
regress/regress-create-exception: PASS, SKIP if $mode == debug
##############################################################################
@@ -61,15 +60,6 @@ array-constructor: PASS || TIMEOUT
unicode-case-overoptimization: PASS, TIMEOUT if ($arch == arm || $arch == android_arm || $arch == mipsel)
##############################################################################
-# This test expects to reach a certain recursion depth, which may not work
-# for debug mode.
-json-recursive: PASS, (PASS || FAIL) if $mode == debug
-
-##############################################################################
-# Skip long running test that times out in debug mode.
-regress/regress-crbug-160010: PASS, SKIP if $mode == debug
-
-##############################################################################
# This test sets the umask on a per-process basis and hence cannot be
# used in multi-threaded runs.
# On android there is no /tmp directory.
diff --git a/deps/v8/test/mjsunit/regress/regress-121407.js b/deps/v8/test/mjsunit/regress/regress-121407.js
index 440370818..25033fb52 100644
--- a/deps/v8/test/mjsunit/regress/regress-121407.js
+++ b/deps/v8/test/mjsunit/regress/regress-121407.js
@@ -37,4 +37,4 @@ a[2000000] = 2000000;
a.length=2000;
for (var i = 0; i <= 256; i++) {
a[i] = new Object();
-}
+} \ No newline at end of file
diff --git a/deps/v8/test/mjsunit/regress/regress-164442.js b/deps/v8/test/mjsunit/regress/regress-164442.js
deleted file mode 100644
index 1160d874f..000000000
--- a/deps/v8/test/mjsunit/regress/regress-164442.js
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-// Should not take a very long time (n^2 algorithms are bad)
-
-
-function ensureNotNegative(x) {
- return Math.max(0, x | 0);
-}
-
-
-ensureNotNegative(1);
-ensureNotNegative(2);
-
-%OptimizeFunctionOnNextCall(ensureNotNegative);
-
-var r = ensureNotNegative(-1);
-
-assertEquals(0, r);
diff --git a/deps/v8/test/mjsunit/regress/regress-166553.js b/deps/v8/test/mjsunit/regress/regress-166553.js
deleted file mode 100644
index acaf34f4e..000000000
--- a/deps/v8/test/mjsunit/regress/regress-166553.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose_gc
-
-JSON.stringify(String.fromCharCode(1, -11).toString())
-gc();
-var s = String.fromCharCode(1, -11)
-assertEquals(65525, s.charCodeAt(1))
diff --git a/deps/v8/test/mjsunit/regress/regress-1692.js b/deps/v8/test/mjsunit/regress/regress-1692.js
index 32be87f98..06bd66cf7 100644
--- a/deps/v8/test/mjsunit/regress/regress-1692.js
+++ b/deps/v8/test/mjsunit/regress/regress-1692.js
@@ -82,7 +82,7 @@ var o = Object("string");
// Non-string property on String object.
o[10] = 42;
assertTrue(o.propertyIsEnumerable(10));
-assertTrue(o.propertyIsEnumerable(0));
+assertFalse(o.propertyIsEnumerable(0));
// Fast elements.
var o = [1,2,3,4,5];
diff --git a/deps/v8/test/mjsunit/regress/regress-1980.js b/deps/v8/test/mjsunit/regress/regress-1980.js
index d87ff4507..49dfd063b 100644
--- a/deps/v8/test/mjsunit/regress/regress-1980.js
+++ b/deps/v8/test/mjsunit/regress/regress-1980.js
@@ -34,7 +34,7 @@ for (var i = 0; i < invalid_this.length; i++) {
Error.prototype.toString.call(invalid_this[i]);
} catch (e) {
exception = true;
- assertEquals("Error.prototype.toString called on non-object", e.message);
+ assertTrue("called_on_non_object" == e.type);
}
assertTrue(exception);
}
diff --git a/deps/v8/test/mjsunit/regress/regress-2263.js b/deps/v8/test/mjsunit/regress/regress-2263.js
deleted file mode 100644
index 9a9db5877..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2263.js
+++ /dev/null
@@ -1,30 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var obj = { length: { valueOf: function(){ throw { type: "length" }}}};
-var sep = { toString: function(){ throw { type: "toString" }}};
-assertThrows("Array.prototype.join.call(obj, sep)", undefined, "length");
diff --git a/deps/v8/test/mjsunit/regress/regress-2315.js b/deps/v8/test/mjsunit/regress/regress-2315.js
deleted file mode 100644
index a3f9182c9..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2315.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-var foo = (function() {
- return eval("(function bar() { return 1; })");
-})();
-
-foo();
-foo();
-%OptimizeFunctionOnNextCall(foo);
-foo();
-
-// Function should be optimized now.
-assertTrue(%GetOptimizationStatus(foo) != 2);
diff --git a/deps/v8/test/mjsunit/regress/regress-2398.js b/deps/v8/test/mjsunit/regress/regress-2398.js
deleted file mode 100644
index 1c66e7f84..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2398.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-"use strict";
-
-var observed = false;
-
-var object = { get toString() { observed = true; } };
-Object.defineProperty(object, "ro", { value: 1 });
-
-try {
- object.ro = 2; // TypeError caused by trying to write to read-only.
-} catch (e) {
- e.message; // Forces formatting of the message object.
-}
-
-assertFalse(observed);
diff --git a/deps/v8/test/mjsunit/regress/regress-2410.js b/deps/v8/test/mjsunit/regress/regress-2410.js
deleted file mode 100644
index c16fd14cd..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2410.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Object.prototype should be ignored in Object.getOwnPropertyNames
-//
-// See http://code.google.com/p/v8/issues/detail?id=2410 for details.
-
-Object.defineProperty(Object.prototype,
- 'thrower',
- { get: function() { throw Error('bug') } });
-var obj = { thrower: 'local' };
-assertEquals(['thrower'], Object.getOwnPropertyNames(obj));
diff --git a/deps/v8/test/mjsunit/regress/regress-2416.js b/deps/v8/test/mjsunit/regress/regress-2416.js
deleted file mode 100644
index 02afeb9a5..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2416.js
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-assertFalse(2147483647 < -2147483648)
-assertFalse(2147483647 <= -2147483648)
-assertFalse(2147483647 == -2147483648)
-assertTrue(2147483647 >= -2147483648)
-assertTrue(2147483647 > -2147483648)
-
-assertTrue(-2147483648 < 2147483647)
-assertTrue(-2147483648 <= 2147483647)
-assertFalse(-2147483648 == 2147483647)
-assertFalse(-2147483648 >= 2147483647)
-assertFalse(-2147483648 > 2147483647)
-
-assertFalse(2147483647 < 2147483647)
-assertTrue(2147483647 <= 2147483647)
-assertTrue(2147483647 == 2147483647)
-assertTrue(2147483647 >= 2147483647)
-assertFalse(2147483647 > 2147483647)
-
-assertFalse(-2147483648 < -2147483648)
-assertTrue(-2147483648 <= -2147483648)
-assertTrue(-2147483648 == -2147483648)
-assertTrue(-2147483648 >= -2147483648)
-assertFalse(-2147483648 > -2147483648)
-
-
-assertFalse(1073741823 < -1073741824)
-assertFalse(1073741823 <= -1073741824)
-assertFalse(1073741823 == -1073741824)
-assertTrue(1073741823 >= -1073741824)
-assertTrue(1073741823 > -1073741824)
-
-assertTrue(-1073741824 < 1073741823)
-assertTrue(-1073741824 <= 1073741823)
-assertFalse(-1073741824 == 1073741823)
-assertFalse(-1073741824 >= 1073741823)
-assertFalse(-1073741824 > 1073741823)
-
-assertFalse(1073741823 < 1073741823)
-assertTrue(1073741823 <= 1073741823)
-assertTrue(1073741823 == 1073741823)
-assertTrue(1073741823 >= 1073741823)
-assertFalse(1073741823 > 1073741823)
-
-assertFalse(-1073741824 < -1073741824)
-assertTrue(-1073741824 <= -1073741824)
-assertTrue(-1073741824 == -1073741824)
-assertTrue(-1073741824 >= -1073741824)
-assertFalse(-1073741824 > -1073741824)
diff --git a/deps/v8/test/mjsunit/regress/regress-2433.js b/deps/v8/test/mjsunit/regress/regress-2433.js
deleted file mode 100644
index dfe7131b5..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2433.js
+++ /dev/null
@@ -1,36 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Transitioning from a PackedSmi to PackedDouble should fill the destination
-// with holes.
-//
-// See http://code.google.com/p/v8/issues/detail?id=2433 for details.
-
-arr = [];
-arr[0] = 0;
-arr[0] = 1.1;
-assertEquals(undefined, arr[1]);
diff --git a/deps/v8/test/mjsunit/regress/regress-2437.js b/deps/v8/test/mjsunit/regress/regress-2437.js
deleted file mode 100644
index c82293ae3..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2437.js
+++ /dev/null
@@ -1,156 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Summary of the spec: lastIndex is reset to 0 if
-// - a regexp fails to match, regardless of global or non-global.
-// - a global regexp is used in a function that returns multiple results,
-// such as String.prototype.replace or String.prototype.match, since it
-// repeats the regexp until it fails to match.
-// Otherwise lastIndex is only set when a global regexp matches, to the index
-// after the match.
-
-// Test Regexp.prototype.exec
-r = /a/;
-r.lastIndex = 1;
-r.exec("zzzz");
-assertEquals(0, r.lastIndex);
-
-// Test Regexp.prototype.test
-r = /a/;
-r.lastIndex = 1;
-r.test("zzzz");
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.match
-r = /a/;
-r.lastIndex = 1;
-"zzzz".match(r);
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.replace with atomic regexp and empty string.
-r = /a/;
-r.lastIndex = 1;
-"zzzz".replace(r, "");
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.replace with non-atomic regexp and empty string.
-r = /\d/;
-r.lastIndex = 1;
-"zzzz".replace(r, "");
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.replace with atomic regexp and non-empty string.
-r = /a/;
-r.lastIndex = 1;
-"zzzz".replace(r, "a");
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.replace with non-atomic regexp and non-empty string.
-r = /\d/;
-r.lastIndex = 1;
-"zzzz".replace(r, "a");
-assertEquals(0, r.lastIndex);
-
-// Test String.prototype.replace with replacement function
-r = /a/;
-r.lastIndex = 1;
-"zzzz".replace(r, function() { return ""; });
-assertEquals(0, r.lastIndex);
-
-// Regexp functions that returns multiple results:
-// A global regexp always resets lastIndex regardless of whether it matches.
-r = /a/g;
-r.lastIndex = -1;
-"0123abcd".replace(r, "x");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = -1;
-"01234567".replace(r, "x");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = -1;
-"0123abcd".match(r);
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = -1;
-"01234567".match(r);
-assertEquals(0, r.lastIndex);
-
-// A non-global regexp resets lastIndex iff it does not match.
-r = /a/;
-r.lastIndex = -1;
-"0123abcd".replace(r, "x");
-assertEquals(-1, r.lastIndex);
-
-r.lastIndex = -1;
-"01234567".replace(r, "x");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = -1;
-"0123abcd".match(r);
-assertEquals(-1, r.lastIndex);
-
-r.lastIndex = -1;
-"01234567".match(r);
-assertEquals(0, r.lastIndex);
-
-// Also test RegExp.prototype.exec and RegExp.prototype.test
-r = /a/g;
-r.lastIndex = 1;
-r.exec("01234567");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = 1;
-r.exec("0123abcd");
-assertEquals(5, r.lastIndex);
-
-r = /a/;
-r.lastIndex = 1;
-r.exec("01234567");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = 1;
-r.exec("0123abcd");
-assertEquals(1, r.lastIndex);
-
-r = /a/g;
-r.lastIndex = 1;
-r.test("01234567");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = 1;
-r.test("0123abcd");
-assertEquals(5, r.lastIndex);
-
-r = /a/;
-r.lastIndex = 1;
-r.test("01234567");
-assertEquals(0, r.lastIndex);
-
-r.lastIndex = 1;
-r.test("0123abcd");
-assertEquals(1, r.lastIndex);
diff --git a/deps/v8/test/mjsunit/regress/regress-2438.js b/deps/v8/test/mjsunit/regress/regress-2438.js
deleted file mode 100644
index 3f4fd7df5..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2438.js
+++ /dev/null
@@ -1,52 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-function testSideEffects(subject, re) {
- var counter = 0;
- var side_effect_object = { valueOf: function() { return counter++; } };
- re.lastIndex = side_effect_object;
- re.exec(subject);
- assertEquals(1, counter);
-
- re.lastIndex = side_effect_object;
- re.test(subject);
- assertEquals(2, counter);
-
- re.lastIndex = side_effect_object;
- subject.match(re);
- assertEquals(3, counter);
-
- re.lastIndex = side_effect_object;
- subject.replace(re, "");
- assertEquals(4, counter);
-}
-
-testSideEffects("zzzz", /a/);
-testSideEffects("zzzz", /a/g);
-testSideEffects("xaxa", /a/);
-testSideEffects("xaxa", /a/g);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2443.js b/deps/v8/test/mjsunit/regress/regress-2443.js
deleted file mode 100644
index 0800c45c0..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2443.js
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Number.prototype methods on non-Numbers.
-
-assertThrows(function() { Number.prototype.toExponential.call({}) },
- TypeError);
-
-assertThrows(function() { Number.prototype.toPrecision.call({}) },
- TypeError);
-
-assertThrows(function() { Number.prototype.toFixed.call({}) },
- TypeError);
-
-assertThrows(function() { Number.prototype.toString.call({}) },
- TypeError);
-
-assertThrows(function() { Number.prototype.toLocaleString.call({}) },
- TypeError);
-
-assertThrows(function() { Number.prototype.ValueOf.call({}) },
- TypeError);
-
-
-// Call on Number objects with custom valueOf method.
-
-var x_obj = new Number(1);
-x_obj.valueOf = function() { assertUnreachable(); };
-
-assertEquals("1.00e+0",
- Number.prototype.toExponential.call(x_obj, 2));
-
-assertEquals("1.0",
- Number.prototype.toPrecision.call(x_obj, 2));
-
-assertEquals("1.00",
- Number.prototype.toFixed.call(x_obj, 2));
-
-// Call on primitive numbers.
-assertEquals("1.00e+0",
- Number.prototype.toExponential.call(1, 2));
-
-assertEquals("1.0",
- Number.prototype.toPrecision.call(1, 2));
-
-assertEquals("1.00",
- Number.prototype.toFixed.call(1, 2));
-
-
-// toExponential and toPrecision does following steps in order
-// 1) convert the argument using ToInteger
-// 2) check for non-finite receiver, on which it returns,
-// 3) check argument range and throw exception if out of range.
-// Note that the the last two steps are reversed for toFixed.
-// Luckily, the receiver is expected to be a number or number
-// wrapper, so that getting its value is not observable.
-
-var f_flag = false;
-var f_obj = { valueOf: function() { f_flag = true; return 1000; } };
-
-assertEquals("NaN",
- Number.prototype.toExponential.call(NaN, f_obj));
-assertTrue(f_flag);
-
-f_flag = false;
-assertEquals("Infinity",
- Number.prototype.toExponential.call(1/0, f_obj));
-assertTrue(f_flag);
-
-f_flag = false;
-assertEquals("-Infinity",
- Number.prototype.toExponential.call(-1/0, f_obj));
-assertTrue(f_flag);
-
-f_flag = false;
-assertEquals("NaN",
- Number.prototype.toPrecision.call(NaN, f_obj));
-assertTrue(f_flag);
-
-f_flag = false;
-assertEquals("Infinity",
- Number.prototype.toPrecision.call(1/0, f_obj));
-assertTrue(f_flag);
-
-f_flag = false;
-assertEquals("-Infinity",
- Number.prototype.toPrecision.call(-1/0, f_obj));
-assertTrue(f_flag);
-
-// The odd man out: toFixed.
-
-f_flag = false;
-assertThrows(function() { Number.prototype.toFixed.call(NaN, f_obj) },
- RangeError);
-assertTrue(f_flag);
-
-f_flag = false;
-assertThrows(function() { Number.prototype.toFixed.call(1/0, f_obj) },
- RangeError);
-assertTrue(f_flag);
-
-f_flag = false;
-assertThrows(function() { Number.prototype.toFixed.call(-1/0, f_obj) },
- RangeError);
-assertTrue(f_flag);
diff --git a/deps/v8/test/mjsunit/regress/regress-2444.js b/deps/v8/test/mjsunit/regress/regress-2444.js
deleted file mode 100644
index 8fb8d8b52..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2444.js
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-
-var flags;
-
-function resetFlags(size) {
- flags = Array(size);
- while (size--) flags[size] = 0;
-}
-
-function assertFlags(array) {
- assertArrayEquals(array, flags);
-}
-
-function object_factory(flag_index, value, expected_flags) {
- var obj = {};
- obj.valueOf = function() {
- assertFlags(expected_flags);
- flags[flag_index]++;
- return value;
- }
- return obj;
-}
-
-
-assertEquals(-Infinity, Math.max());
-
-resetFlags(1);
-assertEquals(NaN,
- Math.max(object_factory(0, NaN, [0])));
-assertFlags([1]);
-
-resetFlags(2);
-assertEquals(NaN,
- Math.max(object_factory(0, NaN, [0, 0]),
- object_factory(1, 0, [1, 0])));
-assertFlags([1, 1]);
-
-resetFlags(3);
-assertEquals(NaN,
- Math.max(object_factory(0, NaN, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, 1, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-resetFlags(3);
-assertEquals(NaN,
- Math.max(object_factory(0, 2, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, NaN, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-resetFlags(3);
-assertEquals(2,
- Math.max(object_factory(0, 2, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, 1, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-
-assertEquals(+Infinity, Math.min());
-
-resetFlags(1);
-assertEquals(NaN,
- Math.min(object_factory(0, NaN, [0])));
-assertFlags([1]);
-
-resetFlags(2);
-assertEquals(NaN,
- Math.min(object_factory(0, NaN, [0, 0]),
- object_factory(1, 0, [1, 0])));
-assertFlags([1, 1]);
-
-resetFlags(3);
-assertEquals(NaN,
- Math.min(object_factory(0, NaN, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, 1, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-resetFlags(3);
-assertEquals(NaN,
- Math.min(object_factory(0, 2, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, NaN, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-resetFlags(3);
-assertEquals(0,
- Math.min(object_factory(0, 2, [0, 0, 0]),
- object_factory(1, 0, [1, 0, 0]),
- object_factory(2, 1, [1, 1, 0])));
-assertFlags([1, 1, 1]);
-
-
diff --git a/deps/v8/test/mjsunit/regress/regress-2489.js b/deps/v8/test/mjsunit/regress/regress-2489.js
deleted file mode 100644
index 882c4f794..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2489.js
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-"use strict";
-
-function f(a, b) {
- return g("c", "d");
-}
-
-function g(a, b) {
- g.constructor.apply(this, arguments);
-}
-
-g.constructor = function(a, b) {
- assertEquals("c", a);
- assertEquals("d", b);
-}
-
-f("a", "b");
-f("a", "b");
-%OptimizeFunctionOnNextCall(f);
-f("a", "b");
-g.x = "deopt";
-f("a", "b");
diff --git a/deps/v8/test/mjsunit/regress/regress-2499.js b/deps/v8/test/mjsunit/regress/regress-2499.js
deleted file mode 100644
index 52aad874d..000000000
--- a/deps/v8/test/mjsunit/regress/regress-2499.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-function foo(word, nBits) {
- return (word[1] >>> nBits) | (word[0] << (32 - nBits));
-}
-
-word = [0x1001, 0];
-
-var expected = foo(word, 1);
-foo(word, 1);
-%OptimizeFunctionOnNextCall(foo);
-var optimized = foo(word, 1);
-assertEquals(expected, optimized)
diff --git a/deps/v8/test/mjsunit/regress/regress-492.js b/deps/v8/test/mjsunit/regress/regress-492.js
index 53b3195cf..a8b783b30 100644
--- a/deps/v8/test/mjsunit/regress/regress-492.js
+++ b/deps/v8/test/mjsunit/regress/regress-492.js
@@ -29,7 +29,7 @@
// This should not hit any asserts in debug mode on ARM.
function function_with_n_args(n) {
- var source = '(function f' + n + '(';
+ var source = '(function f(';
for (var arg = 0; arg < n; arg++) {
if (arg != 0) source += ',';
source += 'arg' + arg;
@@ -50,41 +50,3 @@ for (args = 500; args < 520; args++) {
for (args = 1019; args < 1041; args++) {
function_with_n_args(args);
}
-
-
-function foo(
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x,
- x, x, x, x, x, x, x, x, x, x, x, x, x, x, x, x
-) {}
-
-for (var i = 0; i < 10000; ++i) foo();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-135066.js b/deps/v8/test/mjsunit/regress/regress-crbug-135066.js
index 35e9ff8c8..1aeca8b1a 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-135066.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-135066.js
@@ -29,27 +29,25 @@
var filler = "//" + new Array(1024).join('x');
// Test strict eval in global context.
-assertEquals(23, eval(
+eval(
"'use strict';" +
"var x = 23;" +
"var f = function bozo1() {" +
" return x;" +
"};" +
"assertSame(23, f());" +
- "f;" +
filler
-)());
+);
// Test default eval in strict context.
-assertEquals(42, (function() {
+(function() {
"use strict";
- return eval(
+ eval(
"var y = 42;" +
"var g = function bozo2() {" +
" return y;" +
"};" +
"assertSame(42, g());" +
- "g;" +
filler
- )();
-})());
+ );
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-157019.js b/deps/v8/test/mjsunit/regress/regress-crbug-157019.js
deleted file mode 100644
index 1c54089ff..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-157019.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax --nocrankshaft
-
-function makeConstructor() {
- return function() {
- this.a = 1;
- this.b = 2;
- };
-}
-
-var c1 = makeConstructor();
-var o1 = new c1();
-
-c1.prototype = {};
-
-for (var i = 0; i < 10; i++) {
- var o = new c1();
- for (var j = 0; j < 8; j++) {
- o["x" + j] = 0;
- }
-}
-
-var c2 = makeConstructor();
-var o2 = new c2();
-
-for (var i = 0; i < 50000; i++) {
- new c2();
-}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-157520.js b/deps/v8/test/mjsunit/regress/regress-crbug-157520.js
deleted file mode 100644
index 17081dfa5..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-157520.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --nocrankshaft
-
-(function(){
- var f = function(arg) {
- arg = 2;
- return arguments[0];
- };
- for (var i = 0; i < 50000; i++) {
- assertSame(2, f(1));
- }
-})();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-158185.js b/deps/v8/test/mjsunit/regress/regress-crbug-158185.js
deleted file mode 100644
index 99f19c72f..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-158185.js
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-assertEquals("0023456",
- Object.keys(JSON.parse('{"0023456": 1}'))[0]);
-assertEquals("1234567890123",
- Object.keys(JSON.parse('{"1234567890123": 1}'))[0]);
-assertEquals("123456789ABCD",
- Object.keys(JSON.parse('{"123456789ABCD": 1}'))[0]);
-assertEquals("12A",
- Object.keys(JSON.parse('{"12A": 1}'))[0]);
-
-assertEquals(1, JSON.parse('{"0":1}')[0]);
-assertEquals(undefined, JSON.parse('{"00":1}')[0]);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-160010.js b/deps/v8/test/mjsunit/regress/regress-crbug-160010.js
deleted file mode 100644
index 266e54532..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-160010.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var str = "a";
-for (var i = 0; i < 28; i++) {
- str += str;
-}
-JSON.stringify(str);
-
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-162085.js b/deps/v8/test/mjsunit/regress/regress-crbug-162085.js
deleted file mode 100644
index a53b2c998..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-162085.js
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Ensure extending an empty packed smi array with a double initializes the
-// array with holes.
-var a = [1,2,3];
-a.length = 0;
-a[0] = 1.4;
-assertEquals(1.4, a[0]);
-assertEquals(undefined, a[1]);
-assertEquals(undefined, a[2]);
-assertEquals(undefined, a[3]);
-
-// Ensure the double array growstub initializes the array with holes.
-function grow_store(a,i,v) {
- a[i] = v;
-}
-
-var a2 = [1.3];
-grow_store(a2,1,1.4);
-a2.length = 0;
-grow_store(a2,0,1.5);
-assertEquals(1.5, a2[0]);
-assertEquals(undefined, a2[1]);
-assertEquals(undefined, a2[2]);
-assertEquals(undefined, a2[3]);
-
-// Check storing objects using the double grow stub.
-var a3 = [1.3];
-var o = {};
-grow_store(a3, 1, o);
-assertEquals(1.3, a3[0]);
-assertEquals(o, a3[1]);
-
-// Ensure the double array growstub initializes the array with holes.
-function grow_store2(a,i,v) {
- a[i] = v;
-}
-
-var a4 = [1.3];
-grow_store2(a4,1,1.4);
-a4.length = 0;
-grow_store2(a4,0,1);
-assertEquals(1, a4[0]);
-assertEquals(undefined, a4[1]);
-assertEquals(undefined, a4[2]);
-assertEquals(undefined, a4[3]);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-170856.js b/deps/v8/test/mjsunit/regress/regress-crbug-170856.js
deleted file mode 100644
index 2e73b12ca..000000000
--- a/deps/v8/test/mjsunit/regress/regress-crbug-170856.js
+++ /dev/null
@@ -1,33 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-r = new RegExp("a");
-for (var i = 0; i < 100; i++) {
- r["abc" + i] = i;
-}
-"zzzz".replace(r, "");
-assertEquals(0, r.lastIndex);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-18639.js b/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
index 4f4bb7c79..23e225a4f 100644
--- a/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
+++ b/deps/v8/test/mjsunit/regress/regress-crbug-18639.js
@@ -27,12 +27,8 @@
// See http://crbug.com/18639
-try {
- toString = toString;
- __defineGetter__("z", (0).toLocaleString);
- z;
- z;
- ((0).toLocaleString)();
-} catch (e) {
- assertInstanceof(e, TypeError);
-} \ No newline at end of file
+toString = toString;
+__defineGetter__("z", (0).toLocaleString);
+z;
+z;
+((0).toLocaleString)();
diff --git a/deps/v8/test/mjsunit/regress/regress-delete-empty-double.js b/deps/v8/test/mjsunit/regress/regress-delete-empty-double.js
deleted file mode 100644
index f7af2b1e3..000000000
--- a/deps/v8/test/mjsunit/regress/regress-delete-empty-double.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --allow-natives-syntax
-
-a = [1.1,2.2,3.3];
-a.length = 1;
-delete a[1];
-
-assertTrue(%HasFastDoubleElements(a));
-assertFalse(%HasFastHoleyElements(a));
-
-delete a[0];
-
-assertTrue(%HasFastDoubleElements(a));
-assertTrue(%HasFastHoleyElements(a));
diff --git a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js b/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
deleted file mode 100644
index c0a71bf4a..000000000
--- a/deps/v8/test/mjsunit/regress/regress-json-stringify-gc.js
+++ /dev/null
@@ -1,41 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var a = [];
-var new_space_string = "";
-for (var i = 0; i < 128; i++) {
- new_space_string += String.fromCharCode((Math.random() * 26 + 65) | 0);
-}
-for (var i = 0; i < 10000; i++) a.push(new_space_string);
-
-// At some point during the first stringify, allocation causes a GC and
-// new_space_string is moved to old space. Make sure that this does not
-// screw up reading from the correct location.
-json1 = JSON.stringify(a);
-json2 = JSON.stringify(a);
-assertTrue(json1 == json2, "GC caused JSON.stringify to fail.");
-
diff --git a/deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js b/deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js
deleted file mode 100644
index aea9c73b2..000000000
--- a/deps/v8/test/mjsunit/regress/regress-observe-empty-double-array.js
+++ /dev/null
@@ -1,37 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-observation --allow-natives-syntax
-//
-// Test passes if it does not crash.
-
-arr = [1.1];
-Object.observe(arr, function(){});
-arr.length = 0;
-assertTrue(%HasFastDoubleElements(arr));
-// Should not crash
-arr.push(1.1);
diff --git a/deps/v8/test/mjsunit/shift-for-integer-div.js b/deps/v8/test/mjsunit/shift-for-integer-div.js
deleted file mode 100644
index 0fe126229..000000000
--- a/deps/v8/test/mjsunit/shift-for-integer-div.js
+++ /dev/null
@@ -1,59 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-function divp4(x) {
- return x / 4;
-}
-
-for (var i = 0; i < 10000; i+=4) {
- assertEquals(i >> 2, divp4(i));
-}
-
-assertEquals(0.5, divp4(2));
-
-function divn4(x) {
- return x / (-4);
-}
-
-for (var i = 0; i < 10000; i+=4) {
- assertEquals(-(i >> 2), divn4(i));
-}
-
-assertEquals(-0, divn4(0));
-
-
-function divn1(x) {
- return x / (-1);
-}
-
-for (var i = 0; i < 10000; i++) {
- assertEquals(-i, divn1(i));
-}
-
-var min_int = -(0x7FFFFFFF)-1;
-assertEquals(-min_int, divn1(min_int));
-
diff --git a/deps/v8/test/mjsunit/stack-traces-overflow.js b/deps/v8/test/mjsunit/stack-traces-overflow.js
deleted file mode 100644
index 7722e93bd..000000000
--- a/deps/v8/test/mjsunit/stack-traces-overflow.js
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-function rec1(a) { rec1(a+1); }
-function rec2(a) { rec3(a+1); }
-function rec3(a) { rec2(a+1); }
-
-// Test stack trace getter and setter.
-try {
- rec1(0);
-} catch (e) {
- assertTrue(e.stack.indexOf("rec1") > 0);
- e.stack = "123";
- assertEquals("123", e.stack);
-}
-
-// Test setter w/o calling the getter.
-try {
- rec2(0);
-} catch (e) {
- assertTrue(e.stack.indexOf("rec2") > 0);
- assertTrue(e.stack.indexOf("rec3") > 0);
- e.stack = "123";
- assertEquals("123", e.stack);
-}
-
-// Test getter to make sure setter does not affect the boilerplate.
-try {
- rec1(0);
-} catch (e) {
- assertTrue(e.stack.indexOf("rec1") > 0);
- assertInstanceof(e, RangeError);
-}
-
-
-// Check setting/getting stack property on the prototype chain.
-function testErrorPrototype(prototype) {
- var object = {};
- object.__proto__ = prototype;
- object.stack = "123";
- assertEquals("123", object.stack);
- assertTrue("123" != prototype.stack);
-}
-
-try {
- rec1(0);
-} catch (e) {
- e.stack;
- testErrorPrototype(e);
-}
-
-try {
- rec1(0);
-} catch (e) {
- testErrorPrototype(e);
-}
-
-try {
- throw new Error();
-} catch (e) {
- testErrorPrototype(e);
-}
-
-Error.stackTraceLimit = 3;
-try {
- rec1(0);
-} catch (e) {
- assertEquals(4, e.stack.split('\n').length);
-}
-
-Error.stackTraceLimit = 25.9;
-try {
- rec1(0);
-} catch (e) {
- assertEquals(26, e.stack.split('\n').length);
-}
-
-Error.stackTraceLimit = NaN;
-try {
- rec1(0);
-} catch (e) {
- assertEquals(1, e.stack.split('\n').length);
-}
-
-Error.stackTraceLimit = "not a number";
-try {
- rec1(0);
-} catch (e) {
- assertEquals(undefined, e.stack);
-}
-
-Error.stackTraceLimit = 3;
-Error = ""; // Overwrite Error in the global object.
-try {
- rec1(0);
-} catch (e) {
- assertEquals(4, e.stack.split('\n').length);
-}
diff --git a/deps/v8/test/mjsunit/strict-mode.js b/deps/v8/test/mjsunit/strict-mode.js
index 5fb404a79..9c9bdfd52 100644
--- a/deps/v8/test/mjsunit/strict-mode.js
+++ b/deps/v8/test/mjsunit/strict-mode.js
@@ -1141,9 +1141,9 @@ function CheckPillDescriptor(func, name) {
function strict() {
"use strict";
- return return_my_caller();
+ return_my_caller();
}
- assertSame(null, strict());
+ assertThrows(strict, TypeError);
function non_strict() {
return return_my_caller();
@@ -1155,57 +1155,32 @@ function CheckPillDescriptor(func, name) {
(function TestNonStrictFunctionCallerPill() {
function strict(n) {
"use strict";
- return non_strict(n);
+ non_strict(n);
}
function recurse(n, then) {
if (n > 0) {
- return recurse(n - 1, then);
+ recurse(n - 1);
} else {
return then();
}
}
function non_strict(n) {
- return recurse(n, function() { return non_strict.caller; });
+ recurse(n, function() { non_strict.caller; });
}
function test(n) {
- return recurse(n, function() { return strict(n); });
- }
-
- for (var i = 0; i < 10; i ++) {
- assertSame(null, test(i));
- }
-})();
-
-
-(function TestNonStrictFunctionCallerDescriptorPill() {
- function strict(n) {
- "use strict";
- return non_strict(n);
- }
-
- function recurse(n, then) {
- if (n > 0) {
- return recurse(n - 1, then);
- } else {
- return then();
+ try {
+ recurse(n, function() { strict(n); });
+ } catch(e) {
+ return e instanceof TypeError;
}
- }
-
- function non_strict(n) {
- return recurse(n, function() {
- return Object.getOwnPropertyDescriptor(non_strict, "caller").value;
- });
- }
-
- function test(n) {
- return recurse(n, function() { return strict(n); });
+ return false;
}
for (var i = 0; i < 10; i ++) {
- assertSame(null, test(i));
+ assertEquals(test(i), true);
}
})();
diff --git a/deps/v8/test/mjsunit/string-natives.js b/deps/v8/test/mjsunit/string-natives.js
deleted file mode 100644
index b1ec87542..000000000
--- a/deps/v8/test/mjsunit/string-natives.js
+++ /dev/null
@@ -1,72 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc --allow-natives-syntax
-
-function test() {
- var s1 = %NewString(26, true);
- for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, i+65);
- assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
- s1 = %TruncateString(s1, 13);
- assertEquals("ABCDEFGHIJKLM", s1);
-
- var s2 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, i+65);
- assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
- s2 = %TruncateString(s1, 13);
- assertEquals("ABCDEFGHIJKLM", s2);
-
- var s3 = %NewString(26, false);
- for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s3, i, i+1000);
- for (i = 0; i < 26; i++) assertEquals(s3[i], String.fromCharCode(i+1000));
-
- var a = [];
- for (var i = 0; i < 1000; i++) {
- var s = %NewString(10000, i % 2 == 1);
- a.push(s);
- }
-
- gc();
-
- for (var i = 0; i < 1000; i++) {
- assertEquals(10000, a[i].length);
- a[i] = %TruncateString(a[i], 5000);
- }
-
- gc();
-
- for (var i = 0; i < 1000; i++) {
- assertEquals(5000, a[i].length);
- }
-}
-
-
-test();
-test();
-%OptimizeFunctionOnNextCall(test);
-test();
-
diff --git a/deps/v8/test/mjsunit/string-split.js b/deps/v8/test/mjsunit/string-split.js
index 1308244ca..d8412f0ee 100644
--- a/deps/v8/test/mjsunit/string-split.js
+++ b/deps/v8/test/mjsunit/string-split.js
@@ -66,23 +66,6 @@ assertArrayEquals(["div", "#i", "d", ".class"], "div#id.class".split(/(?=[d#.])/
assertArrayEquals(["a", "b", "c"], "abc".split(/(?=.)/));
-assertArrayEquals(["Wenige", "sind", "auserwählt."],
- "Wenige sind auserwählt.".split(" "));
-
-assertArrayEquals([], "Wenige sind auserwählt.".split(" ", 0));
-
-assertArrayEquals(["Wenige"], "Wenige sind auserwählt.".split(" ", 1));
-
-assertArrayEquals(["Wenige", "sind"], "Wenige sind auserwählt.".split(" ", 2));
-
-assertArrayEquals(["Wenige", "sind", "auserwählt."],
- "Wenige sind auserwählt.".split(" ", 3));
-
-assertArrayEquals(["Wenige sind auserw", "hlt."],
- "Wenige sind auserwählt.".split("ä"));
-
-assertArrayEquals(["Wenige sind ", "."],
- "Wenige sind auserwählt.".split("auserwählt"));
/* "ab".split(/((?=.))/)
*
diff --git a/deps/v8/test/mjsunit/testcfg.py b/deps/v8/test/mjsunit/testcfg.py
index c8b972c12..21139562e 100644
--- a/deps/v8/test/mjsunit/testcfg.py
+++ b/deps/v8/test/mjsunit/testcfg.py
@@ -57,10 +57,11 @@ class MjsunitTestSuite(testsuite.TestSuite):
def GetFlagsForTestCase(self, testcase, context):
source = self.GetSourceForTest(testcase)
- flags = [] + context.mode_flags
+ flags = []
flags_match = re.findall(FLAGS_PATTERN, source)
for match in flags_match:
flags += match.strip().split()
+ flags += context.mode_flags
files_list = [] # List of file names to append to command arguments.
files_match = FILES_PATTERN.search(source);
diff --git a/deps/v8/test/mjsunit/tools/tickprocessor-test.log b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
index 5ddad89a5..db8be79fa 100644
--- a/deps/v8/test/mjsunit/tools/tickprocessor-test.log
+++ b/deps/v8/test/mjsunit/tools/tickprocessor-test.log
@@ -2,24 +2,24 @@ shared-library,"shell",0x08048000,0x081ee000
shared-library,"/lib32/libm-2.7.so",0xf7db6000,0xf7dd9000
shared-library,"ffffe000-fffff000",0xffffe000,0xfffff000
profiler,"begin",1
-code-creation,Stub,0,0xf540a100,474,"CEntryStub"
-code-creation,Script,0,0xf541cd80,736,"exp.js"
-code-creation,Stub,0,0xf541d0e0,47,"RuntimeStub_Math_exp"
-code-creation,LazyCompile,0,0xf541d120,145,"exp native math.js:41"
+code-creation,Stub,0xf540a100,474,"CEntryStub"
+code-creation,Script,0xf541cd80,736,"exp.js"
+code-creation,Stub,0xf541d0e0,47,"RuntimeStub_Math_exp"
+code-creation,LazyCompile,0xf541d120,145,"exp native math.js:41"
function-creation,0xf441d280,0xf541d120
-code-creation,LoadIC,0,0xf541d280,117,"j"
-code-creation,LoadIC,0,0xf541d360,63,"i"
-tick,0x80f82d1,0xffdfe880,0,0,0,0,0xf541ce5c
-tick,0x80f89a1,0xffdfecf0,0,0,0,0,0xf541ce5c
-tick,0x8123b5c,0xffdff1a0,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0x8123b65,0xffdff1a0,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf541d2be,0xffdff1e4,0,0,0,0
-tick,0xf541d320,0xffdff1dc,0,0,0,0
-tick,0xf541d384,0xffdff1d8,0,0,0,0
-tick,0xf7db94da,0xffdff0ec,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7db951c,0xffdff0f0,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbc508,0xffdff14c,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7dbff21,0xffdff198,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xf7edec90,0xffdff0ec,0,0,0,0,0xf541d1a1,0xf541ceea
-tick,0xffffe402,0xffdff488,0,0,0,0
+code-creation,LoadIC,0xf541d280,117,"j"
+code-creation,LoadIC,0xf541d360,63,"i"
+tick,0x80f82d1,0xffdfe880,0,0,0,0xf541ce5c
+tick,0x80f89a1,0xffdfecf0,0,0,0,0xf541ce5c
+tick,0x8123b5c,0xffdff1a0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0x8123b65,0xffdff1a0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf541d2be,0xffdff1e4,0,0,0
+tick,0xf541d320,0xffdff1dc,0,0,0
+tick,0xf541d384,0xffdff1d8,0,0,0
+tick,0xf7db94da,0xffdff0ec,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7db951c,0xffdff0f0,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbc508,0xffdff14c,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7dbff21,0xffdff198,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xf7edec90,0xffdff0ec,0,0,0,0xf541d1a1,0xf541ceea
+tick,0xffffe402,0xffdff488,0,0,0
profiler,"end"
diff --git a/deps/v8/test/mjsunit/uri.js b/deps/v8/test/mjsunit/uri.js
index fae349f43..178ff1f2a 100644
--- a/deps/v8/test/mjsunit/uri.js
+++ b/deps/v8/test/mjsunit/uri.js
@@ -76,15 +76,3 @@ assertEquals(cc8_2, decodeURI(encodeURI(s8)).charCodeAt(1));
assertEquals(cc9_1, decodeURI(encodeURI(s9)).charCodeAt(0));
assertEquals(cc9_2, decodeURI(encodeURI(s9)).charCodeAt(1));
assertEquals(cc10, decodeURI(encodeURI(s10)).charCodeAt(0));
-
-assertEquals("", decodeURI(""));
-assertEquals("", encodeURI(""));
-
-function test(string) {
- assertEquals(string, decodeURI(encodeURI(string)));
-}
-
-test("\u1234\u0123\uabcd");
-test("abcd");
-test("ab<\u1234\u0123");
-test("ab\u1234<\u0123");
diff --git a/deps/v8/test/mozilla/mozilla.status b/deps/v8/test/mozilla/mozilla.status
index f915459d6..4f2fbdea5 100644
--- a/deps/v8/test/mozilla/mozilla.status
+++ b/deps/v8/test/mozilla/mozilla.status
@@ -212,9 +212,6 @@ js1_5/Array/regress-101964: PASS || FAIL if $mode == debug
# This section is for tests that fail in both V8 and JSC. Thus they
# have been determined to be incompatible between Mozilla and V8/JSC.
-# toPrecision argument restricted to range 1..21 in JSC/V8 and ECMA-262
-js1_5/Regress/regress-452346: FAIL_OK
-
# Fail because it calls builtins as functions and do not expect the
# builtin to have undefined as the receiver.
ecma/String/15.5.4.6-2: FAIL_OK
@@ -248,6 +245,13 @@ js1_5/Function/regress-338121-03: FAIL_OK
# Expectes 'prototype' property of functions to be enumerable.
js1_5/Function/10.1.6-01: FAIL_OK
+# toPrecision argument restricted to range 1..21 in JSC/V8
+js1_5/Regress/regress-452346: FAIL_OK
+ecma_3/Number/15.7.4.7-1: FAIL_OK
+
+# toExponential argument restricted to range 0..20 in JSC/V8
+ecma_3/Number/15.7.4.6-1: FAIL_OK
+
#:=== RegExp:===
# We don't match the syntax error message of Mozilla for invalid
# RegExp flags.
diff --git a/deps/v8/test/test262/README b/deps/v8/test/test262/README
index 1ddbc709b..59e7f5eb8 100644
--- a/deps/v8/test/test262/README
+++ b/deps/v8/test/test262/README
@@ -4,11 +4,11 @@ tests from
http://hg.ecmascript.org/tests/test262
-at revision 360 as 'data' in this directory. Using later version
+at revision 334 as 'data' in this directory. Using later version
may be possible but the tests are only known to pass (and indeed run)
with that revision.
-hg clone -r 360 http://hg.ecmascript.org/tests/test262 data
+hg clone -r 334 http://hg.ecmascript.org/tests/test262 data
If you do update to a newer revision you may have to change the test
harness adapter code since it uses internal functionality from the
diff --git a/deps/v8/test/test262/test262.status b/deps/v8/test/test262/test262.status
index 8eaa3657f..06b43c717 100644
--- a/deps/v8/test/test262/test262.status
+++ b/deps/v8/test/test262/test262.status
@@ -39,20 +39,14 @@ S15.12.2_A1: FAIL
# V8 Bug: http://code.google.com/p/v8/issues/detail?id=691
11.2.3-3_3: FAIL
-# Strings that are considered canonically equivalent by the Unicode standard
-# return a non-zero value on String.prototype.localeCompare calls.
-# V8 Bug: http://code.google.com/p/v8/issues/detail?id=2413
-15.5.4.9_CE: FAIL
-
##################### DELIBERATE INCOMPATIBILITIES #####################
-# This tests precision of Math functions. The implementation for those
+# This tests precision of Math.tan and Math.sin. The implementation for those
# trigonometric functions are platform/compiler dependent. Furthermore, the
# expectation values by far deviates from the actual result given by an
# arbitrary-precision calculator, making those tests partly bogus.
-S15.8.2.8_A6: PASS || FAIL_OK # Math.exp (less precise with --fast-math)
-S15.8.2.16_A7: PASS || FAIL_OK # Math.sin
-S15.8.2.18_A7: PASS || FAIL_OK # Math.tan
+S15.8.2.16_A7: PASS || FAIL_OK
+S15.8.2.18_A7: PASS || FAIL_OK
# Linux for ia32 (and therefore simulators) default to extended 80 bit floating
# point formats, so these tests checking 64-bit FP precision fail. The other
diff --git a/deps/v8/test/test262/testcfg.py b/deps/v8/test/test262/testcfg.py
index f937442f5..875a4e5ed 100644
--- a/deps/v8/test/test262/testcfg.py
+++ b/deps/v8/test/test262/testcfg.py
@@ -36,11 +36,10 @@ from testrunner.local import testsuite
from testrunner.objects import testcase
-TEST_262_ARCHIVE_REVISION = "53c4ade82d14" # This is the r360 revision.
-TEST_262_ARCHIVE_MD5 = "5fa4918b00e5d60e57bdd3c05deaeb0c"
+TEST_262_ARCHIVE_REVISION = "fb327c439e20" # This is the r334 revision.
+TEST_262_ARCHIVE_MD5 = "307acd166ec34629592f240dc12d57ed"
TEST_262_URL = "http://hg.ecmascript.org/tests/test262/archive/%s.tar.bz2"
-TEST_262_HARNESS = ["sta.js", "testBuiltInObject.js"]
-TEST_262_SKIP = ["intl402"]
+TEST_262_HARNESS = ["sta.js"]
class Test262TestSuite(testsuite.TestSuite):
@@ -60,8 +59,6 @@ class Test262TestSuite(testsuite.TestSuite):
for dirname, dirs, files in os.walk(self.testroot):
for dotted in [x for x in dirs if x.startswith(".")]:
dirs.remove(dotted)
- for skipped in [x for x in dirs if x in TEST_262_SKIP]:
- dirs.remove(skipped)
dirs.sort()
files.sort()
for filename in files:
@@ -179,8 +176,6 @@ class Test262TestConfiguration(test.TestConfiguration):
for root, dirs, files in os.walk(testroot):
for dotted in [x for x in dirs if x.startswith('.')]:
dirs.remove(dotted)
- for skipped in [x for x in dirs if x in TEST_262_SKIP]:
- dirs.remove(skipped)
dirs.sort()
root_path = root[len(self.root):].split(os.path.sep)
root_path = current_path + [x for x in root_path if x]
diff --git a/deps/v8/tools/gen-postmortem-metadata.py b/deps/v8/tools/gen-postmortem-metadata.py
index 0acb658c5..f59cfd303 100644
--- a/deps/v8/tools/gen-postmortem-metadata.py
+++ b/deps/v8/tools/gen-postmortem-metadata.py
@@ -61,7 +61,7 @@ consts_misc = [
{ 'name': 'StringEncodingMask', 'value': 'kStringEncodingMask' },
{ 'name': 'TwoByteStringTag', 'value': 'kTwoByteStringTag' },
- { 'name': 'AsciiStringTag', 'value': 'kOneByteStringTag' },
+ { 'name': 'AsciiStringTag', 'value': 'kAsciiStringTag' },
{ 'name': 'StringRepresentationMask',
'value': 'kStringRepresentationMask' },
@@ -76,15 +76,16 @@ consts_misc = [
{ 'name': 'SmiTag', 'value': 'kSmiTag' },
{ 'name': 'SmiTagMask', 'value': 'kSmiTagMask' },
{ 'name': 'SmiValueShift', 'value': 'kSmiTagSize' },
- { 'name': 'SmiShiftSize', 'value': 'kSmiShiftSize' },
{ 'name': 'PointerSizeLog2', 'value': 'kPointerSizeLog2' },
+ { 'name': 'prop_idx_transitions',
+ 'value': 'DescriptorArray::kTransitionsIndex' },
{ 'name': 'prop_idx_first',
'value': 'DescriptorArray::kFirstIndex' },
{ 'name': 'prop_type_field',
'value': 'FIELD' },
{ 'name': 'prop_type_first_phantom',
- 'value': 'TRANSITION' },
+ 'value': 'MAP_TRANSITION' },
{ 'name': 'prop_type_mask',
'value': 'PropertyDetails::TypeField::kMask' },
@@ -106,13 +107,14 @@ extras_accessors = [
'JSObject, elements, Object, kElementsOffset',
'FixedArray, data, uintptr_t, kHeaderSize',
'Map, instance_attributes, int, kInstanceAttributesOffset',
+ 'Map, instance_descriptors, int, kInstanceDescriptorsOrBitField3Offset',
'Map, inobject_properties, int, kInObjectPropertiesOffset',
'Map, instance_size, int, kInstanceSizeOffset',
'HeapNumber, value, double, kValueOffset',
'ConsString, first, String, kFirstOffset',
'ConsString, second, String, kSecondOffset',
'ExternalString, resource, Object, kResourceOffset',
- 'SeqOneByteString, chars, char, kHeaderSize',
+ 'SeqAsciiString, chars, char, kHeaderSize',
'SharedFunctionInfo, code, Code, kCodeOffset',
'Code, instruction_start, uintptr_t, kHeaderSize',
'Code, instruction_size, int, kInstructionSizeOffset',
@@ -126,7 +128,7 @@ extras_accessors = [
expected_classes = [
'ConsString', 'FixedArray', 'HeapNumber', 'JSArray', 'JSFunction',
'JSObject', 'JSRegExp', 'JSValue', 'Map', 'Oddball', 'Script',
- 'SeqOneByteString', 'SharedFunctionInfo'
+ 'SeqAsciiString', 'SharedFunctionInfo'
];
@@ -291,7 +293,7 @@ def load_objects():
cctype.find('Sliced') == -1):
if (cctype.find('Ascii') != -1):
cctype = re.sub('AsciiString$',
- 'SeqOneByteString', cctype);
+ 'SeqAsciiString', cctype);
else:
cctype = re.sub('String$',
'SeqString', cctype);
diff --git a/deps/v8/tools/grokdump.py b/deps/v8/tools/grokdump.py
index b51e4e0e7..46ead5e46 100755
--- a/deps/v8/tools/grokdump.py
+++ b/deps/v8/tools/grokdump.py
@@ -863,83 +863,80 @@ INSTANCE_TYPES = {
# }
# printf("}\n");
KNOWN_MAPS = {
- 0x08081: (128, "MetaMap"),
- 0x080a5: (163, "FixedArrayMap"),
- 0x080c9: (130, "OddballMap"),
- 0x080ed: (163, "FixedCOWArrayMap"),
- 0x08111: (163, "ScopeInfoMap"),
- 0x08135: (132, "HeapNumberMap"),
- 0x08159: (133, "ForeignMap"),
- 0x0817d: (64, "SymbolMap"),
- 0x081a1: (68, "AsciiSymbolMap"),
- 0x081c5: (65, "ConsSymbolMap"),
- 0x081e9: (69, "ConsAsciiSymbolMap"),
- 0x0820d: (66, "ExternalSymbolMap"),
- 0x08231: (74, "ExternalSymbolWithAsciiDataMap"),
- 0x08255: (70, "ExternalAsciiSymbolMap"),
- 0x08279: (82, "ShortExternalSymbolMap"),
- 0x0829d: (90, "ShortExternalSymbolWithAsciiDataMap"),
- 0x082c1: (86, "ShortExternalAsciiSymbolMap"),
- 0x082e5: (0, "StringMap"),
- 0x08309: (4, "AsciiStringMap"),
- 0x0832d: (1, "ConsStringMap"),
- 0x08351: (5, "ConsAsciiStringMap"),
- 0x08375: (3, "SlicedStringMap"),
- 0x08399: (7, "SlicedAsciiStringMap"),
- 0x083bd: (2, "ExternalStringMap"),
- 0x083e1: (10, "ExternalStringWithAsciiDataMap"),
- 0x08405: (6, "ExternalAsciiStringMap"),
- 0x08429: (18, "ShortExternalStringMap"),
- 0x0844d: (26, "ShortExternalStringWithAsciiDataMap"),
- 0x08471: (22, "ShortExternalAsciiStringMap"),
- 0x08495: (0, "UndetectableStringMap"),
- 0x084b9: (4, "UndetectableAsciiStringMap"),
- 0x084dd: (145, "FixedDoubleArrayMap"),
- 0x08501: (134, "ByteArrayMap"),
- 0x08525: (135, "FreeSpaceMap"),
- 0x08549: (144, "ExternalPixelArrayMap"),
- 0x0856d: (136, "ExternalByteArrayMap"),
- 0x08591: (137, "ExternalUnsignedByteArrayMap"),
- 0x085b5: (138, "ExternalShortArrayMap"),
- 0x085d9: (139, "ExternalUnsignedShortArrayMap"),
- 0x085fd: (140, "ExternalIntArrayMap"),
- 0x08621: (141, "ExternalUnsignedIntArrayMap"),
- 0x08645: (142, "ExternalFloatArrayMap"),
- 0x08669: (163, "NonStrictArgumentsElementsMap"),
- 0x0868d: (143, "ExternalDoubleArrayMap"),
- 0x086b1: (129, "CodeMap"),
- 0x086d5: (131, "GlobalPropertyCellMap"),
- 0x086f9: (146, "OnePointerFillerMap"),
- 0x0871d: (146, "TwoPointerFillerMap"),
- 0x08741: (147, "AccessorInfoMap"),
- 0x08765: (148, "AccessorPairMap"),
- 0x08789: (149, "AccessCheckInfoMap"),
- 0x087ad: (150, "InterceptorInfoMap"),
- 0x087d1: (151, "CallHandlerInfoMap"),
- 0x087f5: (152, "FunctionTemplateInfoMap"),
- 0x08819: (153, "ObjectTemplateInfoMap"),
- 0x0883d: (154, "SignatureInfoMap"),
- 0x08861: (155, "TypeSwitchInfoMap"),
- 0x08885: (156, "ScriptMap"),
- 0x088a9: (157, "CodeCacheMap"),
- 0x088cd: (158, "PolymorphicCodeCacheMap"),
- 0x088f1: (159, "TypeFeedbackInfoMap"),
- 0x08915: (160, "AliasedArgumentsEntryMap"),
- 0x08939: (161, "DebugInfoMap"),
- 0x0895d: (162, "BreakPointInfoMap"),
- 0x08981: (163, "HashTableMap"),
- 0x089a5: (163, "FunctionContextMap"),
- 0x089c9: (163, "CatchContextMap"),
- 0x089ed: (163, "WithContextMap"),
- 0x08a11: (163, "BlockContextMap"),
- 0x08a35: (163, "ModuleContextMap"),
- 0x08a59: (163, "GlobalContextMap"),
- 0x08a7d: (163, "NativeContextMap"),
- 0x08aa1: (164, "SharedFunctionInfoMap"),
- 0x08ac5: (165, "JSMessageObjectMap"),
- 0x08ae9: (170, "ExternalMap"),
- 0x08b0d: (170, "NeanderMap"),
- 0x08b31: (170, ""),
+ 0x08081: (134, "ByteArrayMap"),
+ 0x080a1: (128, "MetaMap"),
+ 0x080c1: (130, "OddballMap"),
+ 0x080e1: (163, "FixedArrayMap"),
+ 0x08101: (68, "AsciiSymbolMap"),
+ 0x08121: (132, "HeapNumberMap"),
+ 0x08141: (135, "FreeSpaceMap"),
+ 0x08161: (146, "OnePointerFillerMap"),
+ 0x08181: (146, "TwoPointerFillerMap"),
+ 0x081a1: (131, "GlobalPropertyCellMap"),
+ 0x081c1: (164, "SharedFunctionInfoMap"),
+ 0x081e1: (4, "AsciiStringMap"),
+ 0x08201: (163, "GlobalContextMap"),
+ 0x08221: (129, "CodeMap"),
+ 0x08241: (163, "ScopeInfoMap"),
+ 0x08261: (163, "FixedCOWArrayMap"),
+ 0x08281: (145, "FixedDoubleArrayMap"),
+ 0x082a1: (163, "HashTableMap"),
+ 0x082c1: (0, "StringMap"),
+ 0x082e1: (64, "SymbolMap"),
+ 0x08301: (1, "ConsStringMap"),
+ 0x08321: (5, "ConsAsciiStringMap"),
+ 0x08341: (3, "SlicedStringMap"),
+ 0x08361: (7, "SlicedAsciiStringMap"),
+ 0x08381: (65, "ConsSymbolMap"),
+ 0x083a1: (69, "ConsAsciiSymbolMap"),
+ 0x083c1: (66, "ExternalSymbolMap"),
+ 0x083e1: (74, "ExternalSymbolWithAsciiDataMap"),
+ 0x08401: (70, "ExternalAsciiSymbolMap"),
+ 0x08421: (2, "ExternalStringMap"),
+ 0x08441: (10, "ExternalStringWithAsciiDataMap"),
+ 0x08461: (6, "ExternalAsciiStringMap"),
+ 0x08481: (82, "ShortExternalSymbolMap"),
+ 0x084a1: (90, "ShortExternalSymbolWithAsciiDataMap"),
+ 0x084c1: (86, "ShortExternalAsciiSymbolMap"),
+ 0x084e1: (18, "ShortExternalStringMap"),
+ 0x08501: (26, "ShortExternalStringWithAsciiDataMap"),
+ 0x08521: (22, "ShortExternalAsciiStringMap"),
+ 0x08541: (0, "UndetectableStringMap"),
+ 0x08561: (4, "UndetectableAsciiStringMap"),
+ 0x08581: (144, "ExternalPixelArrayMap"),
+ 0x085a1: (136, "ExternalByteArrayMap"),
+ 0x085c1: (137, "ExternalUnsignedByteArrayMap"),
+ 0x085e1: (138, "ExternalShortArrayMap"),
+ 0x08601: (139, "ExternalUnsignedShortArrayMap"),
+ 0x08621: (140, "ExternalIntArrayMap"),
+ 0x08641: (141, "ExternalUnsignedIntArrayMap"),
+ 0x08661: (142, "ExternalFloatArrayMap"),
+ 0x08681: (143, "ExternalDoubleArrayMap"),
+ 0x086a1: (163, "NonStrictArgumentsElementsMap"),
+ 0x086c1: (163, "FunctionContextMap"),
+ 0x086e1: (163, "CatchContextMap"),
+ 0x08701: (163, "WithContextMap"),
+ 0x08721: (163, "BlockContextMap"),
+ 0x08741: (163, "ModuleContextMap"),
+ 0x08761: (165, "JSMessageObjectMap"),
+ 0x08781: (133, "ForeignMap"),
+ 0x087a1: (170, "NeanderMap"),
+ 0x087c1: (158, "PolymorphicCodeCacheMap"),
+ 0x087e1: (156, "ScriptMap"),
+ 0x08801: (147, "AccessorInfoMap"),
+ 0x08821: (148, "AccessorPairMap"),
+ 0x08841: (149, "AccessCheckInfoMap"),
+ 0x08861: (150, "InterceptorInfoMap"),
+ 0x08881: (151, "CallHandlerInfoMap"),
+ 0x088a1: (152, "FunctionTemplateInfoMap"),
+ 0x088c1: (153, "ObjectTemplateInfoMap"),
+ 0x088e1: (154, "SignatureInfoMap"),
+ 0x08901: (155, "TypeSwitchInfoMap"),
+ 0x08921: (157, "CodeCacheMap"),
+ 0x08941: (159, "TypeFeedbackInfoMap"),
+ 0x08961: (160, "AliasedArgumentsEntryMap"),
+ 0x08981: (161, "DebugInfoMap"),
+ 0x089a1: (162, "BreakPointInfoMap"),
}
diff --git a/deps/v8/tools/gyp/v8.gyp b/deps/v8/tools/gyp/v8.gyp
index aad07c709..7a54ef4fc 100644
--- a/deps/v8/tools/gyp/v8.gyp
+++ b/deps/v8/tools/gyp/v8.gyp
@@ -69,14 +69,6 @@
],
},
'conditions': [
- ['OS=="android"', {
- 'libraries': [
- '-llog',
- ],
- 'include_dirs': [
- 'src/common/android/include',
- ],
- }],
['OS=="mac"', {
'xcode_settings': {
'OTHER_LDFLAGS': ['-dynamiclib', '-all_load']
@@ -361,7 +353,6 @@
'../../src/isolate.cc',
'../../src/isolate.h',
'../../src/json-parser.h',
- '../../src/json-stringifier.h',
'../../src/jsregexp.cc',
'../../src/jsregexp.h',
'../../src/lazy-instance.h',
@@ -788,7 +779,6 @@
'../../src/macros.py',
'../../src/proxy.js',
'../../src/collection.js',
- '../../src/object-observe.js'
],
},
'actions': [
diff --git a/deps/v8/tools/ll_prof.py b/deps/v8/tools/ll_prof.py
index 216929d1e..3afe179d2 100755
--- a/deps/v8/tools/ll_prof.py
+++ b/deps/v8/tools/ll_prof.py
@@ -45,7 +45,7 @@ USAGE="""usage: %prog [OPTION]...
Analyses V8 and perf logs to produce profiles.
Perf logs can be collected using a command like:
- $ perf record -R -e cycles -c 10000 -f -i ./d8 bench.js --ll-prof
+ $ perf record -R -e cycles -c 10000 -f -i ./shell bench.js --ll-prof
# -R: collect all data
# -e cycles: use cpu-cycles event (run "perf list" for details)
# -c 10000: write a sample after each 10000 events
@@ -54,16 +54,6 @@ Perf logs can be collected using a command like:
# --ll-prof shell flag enables the right V8 logs
This will produce a binary trace file (perf.data) that %prog can analyse.
-IMPORTANT:
- The kernel has an internal maximum for events per second, it is 100K by
- default. That's not enough for "-c 10000". Set it to some higher value:
- $ echo 10000000 | sudo tee /proc/sys/kernel/perf_event_max_sample_rate
- You can also make the warning about kernel address maps go away:
- $ echo 0 | sudo tee /proc/sys/kernel/kptr_restrict
-
-We have a convenience script that handles all of the above for you:
- $ tools/run-llprof.sh ./d8 bench.js
-
Examples:
# Print flat profile with annotated disassembly for the 10 top
# symbols. Use default log names and include the snapshot log.
@@ -85,10 +75,6 @@ class Code(object):
"""Code object."""
_id = 0
- UNKNOWN = 0
- V8INTERNAL = 1
- FULL_CODEGEN = 2
- OPTIMIZED = 3
def __init__(self, name, start_address, end_address, origin, origin_offset):
self.id = Code._id
@@ -102,14 +88,6 @@ class Code(object):
self.self_ticks = 0
self.self_ticks_map = None
self.callee_ticks = None
- if name.startswith("LazyCompile:*"):
- self.codetype = Code.OPTIMIZED
- elif name.startswith("LazyCompile:"):
- self.codetype = Code.FULL_CODEGEN
- elif name.startswith("v8::internal::"):
- self.codetype = Code.V8INTERNAL
- else:
- self.codetype = Code.UNKNOWN
def AddName(self, name):
assert self.name != name
@@ -207,7 +185,7 @@ class Code(object):
class CodePage(object):
"""Group of adjacent code objects."""
- SHIFT = 20 # 1M pages
+ SHIFT = 12 # 4K pages
SIZE = (1 << SHIFT)
MASK = ~(SIZE - 1)
@@ -529,7 +507,6 @@ class Descriptor(object):
# for the gory details.
-# Reference: struct perf_file_header in kernel/tools/perf/util/header.h
TRACE_HEADER_DESC = Descriptor([
("magic", "u64"),
("size", "u64"),
@@ -543,7 +520,6 @@ TRACE_HEADER_DESC = Descriptor([
])
-# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_ATTR_DESC = Descriptor([
("type", "u32"),
("size", "u32"),
@@ -553,13 +529,12 @@ PERF_EVENT_ATTR_DESC = Descriptor([
("read_format", "u64"),
("flags", "u64"),
("wakeup_events_or_watermark", "u32"),
- ("bp_type", "u32"),
+ ("bt_type", "u32"),
("bp_addr", "u64"),
- ("bp_len", "u64")
+ ("bp_len", "u64"),
])
-# Reference: /usr/include/linux/perf_event.h
PERF_EVENT_HEADER_DESC = Descriptor([
("type", "u32"),
("misc", "u16"),
@@ -567,7 +542,6 @@ PERF_EVENT_HEADER_DESC = Descriptor([
])
-# Reference: kernel/events/core.c
PERF_MMAP_EVENT_BODY_DESC = Descriptor([
("pid", "u32"),
("tid", "u32"),
@@ -592,7 +566,6 @@ PERF_SAMPLE_STREAM_ID = 1 << 9
PERF_SAMPLE_RAW = 1 << 10
-# Reference: /usr/include/perf_event.h, the comment for PERF_RECORD_SAMPLE.
PERF_SAMPLE_EVENT_BODY_FIELDS = [
("ip", "u64", PERF_SAMPLE_IP),
("pid", "u32", PERF_SAMPLE_TID),
@@ -729,12 +702,8 @@ class LibraryRepo(object):
# Unfortunately, section headers span two lines, so we have to
# keep the just seen section name (from the first line in each
# section header) in the after_section variable.
- if mmap_info.filename.endswith(".ko"):
- dynamic_symbols = ""
- else:
- dynamic_symbols = "-T"
process = subprocess.Popen(
- "%s -h -t %s -C %s" % (OBJDUMP_BIN, dynamic_symbols, mmap_info.filename),
+ "%s -h -t -T -C %s" % (OBJDUMP_BIN, mmap_info.filename),
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
pipe = process.stdout
after_section = None
@@ -826,7 +795,7 @@ def PrintReport(code_map, library_repo, arch, ticks, options):
code.PrintAnnotated(arch, options)
print
print "Ticks per library:"
- mmap_infos = [m for m in library_repo.infos if m.ticks > 0]
+ mmap_infos = [m for m in library_repo.infos]
mmap_infos.sort(key=lambda m: m.ticks, reverse=True)
for mmap_info in mmap_infos:
mmap_ticks = mmap_info.ticks
@@ -916,9 +885,6 @@ if __name__ == "__main__":
ticks = 0
missed_ticks = 0
really_missed_ticks = 0
- optimized_ticks = 0
- generated_ticks = 0
- v8_internal_ticks = 0
mmap_time = 0
sample_time = 0
@@ -962,12 +928,6 @@ if __name__ == "__main__":
code = code_map.Find(sample.ip)
if code:
code.Tick(sample.ip)
- if code.codetype == Code.OPTIMIZED:
- optimized_ticks += 1
- elif code.codetype == Code.FULL_CODEGEN:
- generated_ticks += 1
- elif code.codetype == Code.V8INTERNAL:
- v8_internal_ticks += 1
else:
missed_ticks += 1
if not library_repo.Tick(sample.ip) and not code:
@@ -987,21 +947,12 @@ if __name__ == "__main__":
PrintReport(code_map, library_repo, log_reader.arch, ticks, options)
if not options.quiet:
- def PrintTicks(number, total, description):
- print("%10d %5.1f%% ticks in %s" %
- (number, 100.0*number/total, description))
print
print "Stats:"
print "%10d total trace events" % events
print "%10d total ticks" % ticks
print "%10d ticks not in symbols" % missed_ticks
- unaccounted = "unaccounted ticks"
- if really_missed_ticks > 0:
- unaccounted += " (probably in the kernel, try --kernel)"
- PrintTicks(really_missed_ticks, ticks, unaccounted)
- PrintTicks(optimized_ticks, ticks, "ticks in optimized code")
- PrintTicks(generated_ticks, ticks, "ticks in other lazily compiled code")
- PrintTicks(v8_internal_ticks, ticks, "ticks in v8::internal::*")
+ print "%10d unaccounted ticks" % really_missed_ticks
print "%10d total symbols" % len([c for c in code_map.AllCode()])
print "%10d used symbols" % len([c for c in code_map.UsedCode()])
print "%9.2fs library processing time" % mmap_time
diff --git a/deps/v8/tools/plot-timer-events b/deps/v8/tools/plot-timer-events
deleted file mode 100755
index 581e0ae33..000000000
--- a/deps/v8/tools/plot-timer-events
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh
-
-# find the name of the log file to process, it must not start with a dash.
-log_file="v8.log"
-for arg in "$@"
-do
- if ! expr "X${arg}" : "^X-" > /dev/null; then
- log_file=${arg}
- fi
-done
-
-tools_path=`cd $(dirname "$0");pwd`
-if [ ! "$D8_PATH" ]; then
- d8_public=`which d8`
- if [ -x "$d8_public" ]; then D8_PATH=$(dirname "$d8_public"); fi
-fi
-[ -n "$D8_PATH" ] || D8_PATH=$tools_path/..
-d8_exec=$D8_PATH/d8
-
-if [ ! -x "$d8_exec" ]; then
- D8_PATH=`pwd`/out/native
- d8_exec=$D8_PATH/d8
-fi
-
-if [ ! -x "$d8_exec" ]; then
- d8_exec=`grep -m 1 -o '".*/d8"' $log_file | sed 's/"//g'`
-fi
-
-if [ ! -x "$d8_exec" ]; then
- echo "d8 shell not found in $D8_PATH"
- echo "To build, execute 'make native' from the V8 directory"
- exit 1
-fi
-
-if [ -n "$DISTORTION" ]; then
- distortion=$DISTORTION
-else
- # Try to find out how much the instrumentation overhead is.
- calibration_log=calibration.log
- calibration_script="for (var i = 0; i < 1000000; i++) print();"
-
- $d8_exec --nocrankshaft --prof --logfile $calibration_log \
- --log-timer-events -e "$calibration_script" > /dev/null
- t_1=`grep "V8.Execute" $calibration_log | tail -n1 | awk -F, '{print $4}'`
- n_1=`grep "timer-event" $calibration_log | wc -l`
-
- $d8_exec --nocrankshaft --prof --logfile $calibration_log \
- --log-internal-timer-events -e "$calibration_script" > /dev/null
- t_2=`grep "V8.Execute" $calibration_log | tail -n1 | awk -F, '{print $4}'`
- n_2=`grep "timer-event" $calibration_log | wc -l`
-
- rm $calibration_log
-
- # Overhead in picoseconds.
- distortion=`echo "1000*($t_1 - $t_2)/($n_1 - $n_2)" | bc`
-fi
-
-if [ -n "$PLOT_RANGE" ]; then
- plot_range=$PLOT_RANGE
-else
- plot_range=auto,auto
-fi
-
-echo "DISTORTION=$distortion"
-echo "PLOT_RANGE=$plot_range"
-
-echo -e "plot-range,$plot_range\ndistortion,$distortion" | cat - $log_file |
- $d8_exec $tools_path/csvparser.js \
- $tools_path/splaytree.js $tools_path/codemap.js \
- $tools_path/logreader.js $tools_path/plot-timer-events.js \
- 2>/dev/null | gnuplot > timer-events.png
diff --git a/deps/v8/tools/plot-timer-events.js b/deps/v8/tools/plot-timer-events.js
deleted file mode 100644
index 4b17e7674..000000000
--- a/deps/v8/tools/plot-timer-events.js
+++ /dev/null
@@ -1,576 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following
-// disclaimer in the documentation and/or other materials provided
-// with the distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived
-// from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-var kV8BinarySuffixes = ["/d8", "/libv8.so"];
-var kStackFrames = 8;
-
-var kTimerEventWidth = 0.33;
-var kExecutionFrameWidth = 0.2;
-var kStackFrameWidth = 0.1;
-var kGapWidth = 0.05;
-
-var kPauseTolerance = 0.1; // Milliseconds.
-var kY1Offset = 10;
-
-var kResX = 1600;
-var kResY = 600;
-var kPauseLabelPadding = 5;
-var kNumPauseLabels = 7;
-var kTickHalfDuration = 0.5; // Milliseconds
-var kCodeKindLabelPadding = 100;
-
-var num_timer_event = kY1Offset + 0.5;
-
-
-function TimerEvent(color, pause, no_execution) {
- this.color = color;
- this.pause = pause;
- this.ranges = [];
- this.no_execution = no_execution;
- this.index = ++num_timer_event;
-}
-
-
-var TimerEvents = {
- 'V8.Execute': new TimerEvent("#000000", false, false),
- 'V8.External': new TimerEvent("#3399FF", false, true),
- 'V8.CompileFullCode': new TimerEvent("#CC0000", true, true),
- 'V8.RecompileSynchronous': new TimerEvent("#CC0044", true, true),
- 'V8.RecompileParallel': new TimerEvent("#CC4499", false, false),
- 'V8.CompileEval': new TimerEvent("#CC4400", true, true),
- 'V8.Parse': new TimerEvent("#00CC00", true, true),
- 'V8.PreParse': new TimerEvent("#44CC00", true, true),
- 'V8.ParseLazy': new TimerEvent("#00CC44", true, true),
- 'V8.GCScavenger': new TimerEvent("#0044CC", true, true),
- 'V8.GCCompactor': new TimerEvent("#4444CC", true, true),
- 'V8.GCContext': new TimerEvent("#4400CC", true, true),
-}
-
-var kExecutionEvent = TimerEvents['V8.Execute'];
-
-
-function CodeKind(color, kinds) {
- this.color = color;
- this.in_execution = [];
- this.stack_frames = [];
- for (var i = 0; i < kStackFrames; i++) this.stack_frames.push([]);
- this.kinds = kinds;
-}
-
-
-var CodeKinds = {
- 'external ': new CodeKind("#3399FF", [-3]),
- 'reg.exp. ': new CodeKind("#0000FF", [-2]),
- 'runtime ': new CodeKind("#000000", [-1]),
- 'full code': new CodeKind("#DD0000", [0]),
- 'opt code ': new CodeKind("#00EE00", [1]),
- 'code stub': new CodeKind("#FF00FF", [2]),
- 'built-in ': new CodeKind("#AA00AA", [3]),
- 'inl.cache': new CodeKind("#4444AA", [4, 5, 6, 7, 8, 9, 10, 11, 12, 13]),
-}
-
-
-var xrange_start = Infinity;
-var xrange_end = 0;
-var obj_index = 0;
-var execution_pauses = [];
-var code_map = new CodeMap();
-
-var xrange_start_override = undefined;
-var xrange_end_override = undefined;
-var distortion_per_entry = 0.005; // Milliseconds
-
-var sort_by_start = [];
-var sort_by_end = [];
-var sorted_ticks = [];
-
-
-function Range(start, end) {
- // Everthing from here are in milliseconds.
- this.start = start;
- this.end = end;
-}
-
-
-function Tick(tick) {
- this.tick = tick;
-}
-
-
-Range.prototype.duration = function() { return this.end - this.start; }
-
-
-function ProcessTimerEvent(name, start, length) {
- var event = TimerEvents[name];
- if (event === undefined) return;
- start /= 1000; // Convert to milliseconds.
- length /= 1000;
- var end = start + length;
- var range = new Range(start, end);
- event.ranges.push(range);
- sort_by_start.push(range);
- sort_by_end.push(range);
-}
-
-
-function ProcessCodeCreateEvent(type, kind, address, size, name) {
- var code_entry = new CodeMap.CodeEntry(size, name);
- code_entry.kind = kind;
- code_map.addCode(address, code_entry);
-}
-
-
-function ProcessCodeMoveEvent(from, to) {
- code_map.moveCode(from, to);
-}
-
-
-function ProcessCodeDeleteEvent(address) {
- code_map.deleteCode(address);
-}
-
-
-function ProcessSharedLibrary(name, start, end) {
- var code_entry = new CodeMap.CodeEntry(end - start, name);
- code_entry.kind = -3; // External code kind.
- for (var i = 0; i < kV8BinarySuffixes.length; i++) {
- var suffix = kV8BinarySuffixes[i];
- if (name.indexOf(suffix, name.length - suffix.length) >= 0) {
- code_entry.kind = -1; // V8 runtime code kind.
- break;
- }
- }
- code_map.addLibrary(start, code_entry);
-}
-
-
-function FindCodeKind(kind) {
- for (name in CodeKinds) {
- if (CodeKinds[name].kinds.indexOf(kind) >= 0) {
- return CodeKinds[name];
- }
- }
-}
-
-
-function ProcessTickEvent(pc, sp, timer, unused_x, unused_y, vmstate, stack) {
- timer /= 1000;
- var tick = new Tick(timer);
-
- var entered = false;
- var entry = code_map.findEntry(pc);
- if (entry) {
- FindCodeKind(entry.kind).in_execution.push(tick);
- entered = true;
- }
-
- for (var i = 0; i < kStackFrames; i++) {
- if (!stack[i]) break;
- var entry = code_map.findEntry(stack[i]);
- if (entry) {
- FindCodeKind(entry.kind).stack_frames[i].push(tick);
- entered = true;
- }
- }
-
- if (entered) sorted_ticks.push(tick);
-}
-
-
-function ProcessDistortion(distortion_in_picoseconds) {
- distortion_per_entry = distortion_in_picoseconds / 1000000;
-}
-
-
-function ProcessPlotRange(start, end) {
- xrange_start_override = start;
- xrange_end_override = end;
-}
-
-
-function Undistort() {
- // Undistort timers wrt instrumentation overhead.
- sort_by_start.sort(function(a, b) { return b.start - a.start; });
- sort_by_end.sort(function(a, b) { return b.end - a.end; });
- sorted_ticks.sort(function(a, b) { return b.tick - a.tick; });
- var distortion = 0;
-
- var next_start = sort_by_start.pop();
- var next_end = sort_by_end.pop();
- var next_tick = sorted_ticks.pop();
-
- function UndistortTicksUntil(tick) {
- while (next_tick) {
- if (next_tick.tick > tick) return;
- next_tick.tick -= distortion;
- next_tick = sorted_ticks.pop();
- }
- }
-
- while (true) {
- var next_start_start = next_start ? next_start.start : Infinity;
- var next_end_end = next_end ? next_end.end : Infinity;
- if (!next_start && !next_end) {
- UndistortTicksUntil(Infinity);
- break;
- }
- if (next_start_start <= next_end_end) {
- UndistortTicksUntil(next_start_start);
- // Undistort the start time stamp.
- next_start.start -= distortion;
- next_start = sort_by_start.pop();
- } else {
- // Undistort the end time stamp. We completely attribute the overhead
- // to the point when we stop and log the timer, so we increase the
- // distortion only here.
- UndistortTicksUntil(next_end_end);
- next_end.end -= distortion;
- distortion += distortion_per_entry;
- next_end = sort_by_end.pop();
- }
- }
-
- sort_by_start = undefined;
- sort_by_end = undefined;
- sorted_ticks = undefined;
-
- // Make sure that start <= end applies for every range.
- for (name in TimerEvents) {
- var ranges = TimerEvents[name].ranges;
- for (var j = 0; j < ranges.length; j++) {
- if (ranges[j].end < ranges[j].start) ranges[j].end = ranges[j].start;
- }
- }
-}
-
-
-function CollectData() {
- // Collect data from log.
- var logreader = new LogReader(
- { 'timer-event' : { parsers: [null, parseInt, parseInt],
- processor: ProcessTimerEvent },
- 'shared-library': { parsers: [null, parseInt, parseInt],
- processor: ProcessSharedLibrary },
- 'code-creation': { parsers: [null, parseInt, parseInt, parseInt, null],
- processor: ProcessCodeCreateEvent },
- 'code-move': { parsers: [parseInt, parseInt],
- processor: ProcessCodeMoveEvent },
- 'code-delete': { parsers: [parseInt],
- processor: ProcessCodeDeleteEvent },
- 'tick': { parsers: [parseInt, parseInt, parseInt,
- null, null, parseInt, 'var-args'],
- processor: ProcessTickEvent },
- 'distortion': { parsers: [parseInt],
- processor: ProcessDistortion },
- 'plot-range': { parsers: [parseInt, parseInt],
- processor: ProcessPlotRange },
- });
-
- var line;
- while (line = readline()) {
- logreader.processLogLine(line);
- }
-
- Undistort();
-
- // Figure out plot range.
- var execution_ranges = kExecutionEvent.ranges;
- for (var i = 0; i < execution_ranges.length; i++) {
- if (execution_ranges[i].start < xrange_start) {
- xrange_start = execution_ranges[i].start;
- }
- if (execution_ranges[i].end > xrange_end) {
- xrange_end = execution_ranges[i].end;
- }
- }
-
- // Collect execution pauses.
- for (name in TimerEvents) {
- var event = TimerEvents[name];
- if (!event.pause) continue;
- var ranges = event.ranges;
- for (var j = 0; j < ranges.length; j++) execution_pauses.push(ranges[j]);
- }
- execution_pauses = MergeRanges(execution_pauses);
-
- // Knock out time not spent in javascript execution. Note that this also
- // includes time spent external code, which do not contribute to execution
- // pauses.
- var exclude_ranges = [];
- for (name in TimerEvents) {
- var event = TimerEvents[name];
- if (!event.no_execution) continue;
- var ranges = event.ranges;
- // Add ranges of this event to the pause list.
- for (var j = 0; j < ranges.length; j++) {
- exclude_ranges.push(ranges[j]);
- }
- }
-
- kExecutionEvent.ranges = MergeRanges(kExecutionEvent.ranges);
- exclude_ranges = MergeRanges(exclude_ranges);
- kExecutionEvent.ranges = ExcludeRanges(kExecutionEvent.ranges,
- exclude_ranges);
-}
-
-
-function DrawBar(row, color, start, end, width) {
- obj_index++;
- command = "set object " + obj_index + " rect";
- command += " from " + start + ", " + (row - width);
- command += " to " + end + ", " + (row + width);
- command += " fc rgb \"" + color + "\"";
- print(command);
-}
-
-
-function TicksToRanges(ticks) {
- var ranges = [];
- for (var i = 0; i < ticks.length; i++) {
- var tick = ticks[i].tick;
- ranges.push(new Range(tick - kTickHalfDuration, tick + kTickHalfDuration));
- }
- return ranges;
-}
-
-
-function MergeRanges(ranges) {
- ranges.sort(function(a, b) { return a.start - b.start; });
- var result = [];
- var j = 0;
- for (var i = 0; i < ranges.length; i = j) {
- var merge_start = ranges[i].start;
- if (merge_start > xrange_end) break; // Out of plot range.
- var merge_end = ranges[i].end;
- for (j = i + 1; j < ranges.length; j++) {
- var next_range = ranges[j];
- // Don't merge ranges if there is no overlap (including merge tolerance).
- if (next_range.start > merge_end + kPauseTolerance) break;
- // Merge ranges.
- if (next_range.end > merge_end) { // Extend range end.
- merge_end = next_range.end;
- }
- }
- if (merge_end < xrange_start) continue; // Out of plot range.
- if (merge_end < merge_start) continue; // Not an actual range.
- result.push(new Range(merge_start, merge_end));
- }
- return result;
-}
-
-
-function ExcludeRanges(include, exclude) {
- // We assume that both input lists are sorted and merged with MergeRanges.
- var result = [];
- var exclude_index = 0;
- var include_index = 0;
- var include_start, include_end, exclude_start, exclude_end;
-
- function NextInclude() {
- if (include_index >= include.length) return false;
- include_start = include[include_index].start;
- include_end = include[include_index].end;
- include_index++;
- return true;
- }
-
- function NextExclude() {
- if (exclude_index >= exclude.length) {
- // No more exclude, finish by repeating case (2).
- exclude_start = Infinity;
- exclude_end = Infinity;
- return false;
- }
- exclude_start = exclude[exclude_index].start;
- exclude_end = exclude[exclude_index].end;
- exclude_index++;
- return true;
- }
-
- if (!NextInclude() || !NextExclude()) return include;
-
- while (true) {
- if (exclude_end <= include_start) {
- // (1) Exclude and include do not overlap.
- // Include #####
- // Exclude ##
- NextExclude();
- } else if (include_end <= exclude_start) {
- // (2) Exclude and include do not overlap.
- // Include #####
- // Exclude ###
- result.push(new Range(include_start, include_end));
- if (!NextInclude()) break;
- } else if (exclude_start <= include_start &&
- exclude_end < include_end &&
- include_start < exclude_end) {
- // (3) Exclude overlaps with begin of include.
- // Include #######
- // Exclude #####
- // Result ####
- include_start = exclude_end;
- NextExclude();
- } else if (include_start < exclude_start &&
- include_end <= exclude_end &&
- exclude_start < include_end) {
- // (4) Exclude overlaps with end of include.
- // Include #######
- // Exclude #####
- // Result ####
- result.push(new Range(include_start, exclude_start));
- if (!NextInclude()) break;
- } else if (exclude_start > include_start && exclude_end < include_end) {
- // (5) Exclude splits include into two parts.
- // Include #######
- // Exclude ##
- // Result ## ###
- result.push(new Range(include_start, exclude_start));
- include_start = exclude_end;
- NextExclude();
- } else if (exclude_start <= include_start && exclude_end >= include_end) {
- // (6) Exclude entirely covers include.
- // Include ######
- // Exclude #########
- if (!NextInclude()) break;
- } else {
- throw new Error("this should not happen!");
- }
- }
-
- return result;
-}
-
-
-function GnuplotOutput() {
- xrange_start = (xrange_start_override || xrange_start_override == 0)
- ? xrange_start_override : xrange_start;
- xrange_end = (xrange_end_override || xrange_end_override == 0)
- ? xrange_end_override : xrange_end;
- print("set terminal pngcairo size " + kResX + "," + kResY +
- " enhanced font 'Helvetica,10'");
- print("set yrange [0:" + (num_timer_event + 1) + "]");
- print("set xlabel \"execution time in ms\"");
- print("set xrange [" + xrange_start + ":" + xrange_end + "]");
- print("set style fill pattern 2 bo 1");
- print("set style rect fs solid 1 noborder");
- print("set style line 1 lt 1 lw 1 lc rgb \"#000000\"");
- print("set xtics out nomirror");
- print("unset key");
-
- // Name Y-axis.
- var ytics = [];
- for (name in TimerEvents) {
- var index = TimerEvents[name].index;
- ytics.push('"' + name + '"' + ' ' + index);
- }
- ytics.push('"code kind being executed"' + ' ' + (kY1Offset - 1));
- ytics.push('"top ' + kStackFrames + ' js stack frames"' + ' ' +
- (kY1Offset - 2));
- ytics.push('"pause times" 0');
- print("set ytics out nomirror (" + ytics.join(', ') + ")");
-
- // Plot timeline.
- for (var name in TimerEvents) {
- var event = TimerEvents[name];
- var ranges = MergeRanges(event.ranges);
- for (var i = 0; i < ranges.length; i++) {
- DrawBar(event.index, event.color,
- ranges[i].start, ranges[i].end,
- kTimerEventWidth);
- }
- }
-
- // Plot code kind gathered from ticks.
- for (var name in CodeKinds) {
- var code_kind = CodeKinds[name];
- var offset = kY1Offset - 1;
- // Top most frame.
- var row = MergeRanges(TicksToRanges(code_kind.in_execution));
- for (var j = 0; j < row.length; j++) {
- DrawBar(offset, code_kind.color,
- row[j].start, row[j].end, kExecutionFrameWidth);
- }
- offset = offset - 2 * kExecutionFrameWidth - kGapWidth;
- // Javascript frames.
- for (var i = 0; i < kStackFrames; i++) {
- offset = offset - 2 * kStackFrameWidth - kGapWidth;
- row = MergeRanges(TicksToRanges(code_kind.stack_frames[i]));
- for (var j = 0; j < row.length; j++) {
- DrawBar(offset, code_kind.color,
- row[j].start, row[j].end, kStackFrameWidth);
- }
- }
- }
-
- // Add labels as legend for code kind colors.
- var padding = kCodeKindLabelPadding * (xrange_end - xrange_start) / kResX;
- var label_x = xrange_start;
- var label_y = kY1Offset;
- for (var name in CodeKinds) {
- label_x += padding;
- print("set label \"" + name + "\" at " + label_x + "," + label_y +
- " textcolor rgb \"" + CodeKinds[name].color + "\"" +
- " font \"Helvetica,9'\"");
- }
-
- if (execution_pauses.length == 0) {
- // Force plot and return without plotting execution pause impulses.
- print("plot 1/0");
- return;
- }
-
- // Label the longest pauses.
- execution_pauses.sort(
- function(a, b) { return b.duration() - a.duration(); });
-
- var max_pause_time = execution_pauses[0].duration();
- padding = kPauseLabelPadding * (xrange_end - xrange_start) / kResX;
- var y_scale = kY1Offset / max_pause_time / 2;
- for (var i = 0; i < execution_pauses.length && i < kNumPauseLabels; i++) {
- var pause = execution_pauses[i];
- var label_content = (pause.duration() | 0) + " ms";
- var label_x = pause.end + padding;
- var label_y = Math.max(1, (pause.duration() * y_scale));
- print("set label \"" + label_content + "\" at " +
- label_x + "," + label_y + " font \"Helvetica,7'\"");
- }
-
- // Scale second Y-axis appropriately.
- var y2range = max_pause_time * num_timer_event / kY1Offset * 2;
- print("set y2range [0:" + y2range + "]");
- // Plot graph with impulses as data set.
- print("plot '-' using 1:2 axes x1y2 with impulses ls 1");
- for (var i = 0; i < execution_pauses.length; i++) {
- var pause = execution_pauses[i];
- print(pause.end + " " + pause.duration());
- }
- print("e");
-}
-
-
-CollectData();
-GnuplotOutput();
diff --git a/deps/v8/tools/run-llprof.sh b/deps/v8/tools/run-llprof.sh
deleted file mode 100755
index d526170d1..000000000
--- a/deps/v8/tools/run-llprof.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-#
-# Copyright 2012 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-# * Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# * Redistributions in binary form must reproduce the above
-# copyright notice, this list of conditions and the following
-# disclaimer in the documentation and/or other materials provided
-# with the distribution.
-# * Neither the name of Google Inc. nor the names of its
-# contributors may be used to endorse or promote products derived
-# from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-########## Global variable definitions
-
-# Ensure that <your CPU clock> / $SAMPLE_EVERY_N_CYCLES < $MAXIMUM_SAMPLE_RATE.
-MAXIMUM_SAMPLE_RATE=10000000
-SAMPLE_EVERY_N_CYCLES=10000
-SAMPLE_RATE_CONFIG_FILE="/proc/sys/kernel/perf_event_max_sample_rate"
-KERNEL_MAP_CONFIG_FILE="/proc/sys/kernel/kptr_restrict"
-
-########## Usage
-
-usage() {
-cat << EOF
-usage: $0 <benchmark_command>
-
-Executes <benchmark_command> under observation by the kernel's "perf" \
-framework, then calls the low level tick processor to analyze the results.
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "-h" ] || [ "$1" == "--help" ] ; then
- usage
- exit 1
-fi
-
-########## Actual script execution
-
-ACTUAL_SAMPLE_RATE=$(cat $SAMPLE_RATE_CONFIG_FILE)
-if [ "$ACTUAL_SAMPLE_RATE" -lt "$MAXIMUM_SAMPLE_RATE" ] ; then
- echo "Setting appropriate maximum sample rate..."
- echo $MAXIMUM_SAMPLE_RATE | sudo tee $SAMPLE_RATE_CONFIG_FILE
-fi
-
-ACTUAL_KERNEL_MAP_RESTRICTION=$(cat $KERNEL_MAP_CONFIG_FILE)
-if [ "$ACTUAL_KERNEL_MAP_RESTRICTION" -ne "0" ] ; then
- echo "Disabling kernel address map restriction..."
- echo 0 | sudo tee $KERNEL_MAP_CONFIG_FILE
-fi
-
-echo "Running benchmark..."
-perf record -R -e cycles -c $SAMPLE_EVERY_N_CYCLES -f -i $@ --ll-prof
diff --git a/deps/v8/tools/run-tests.py b/deps/v8/tools/run-tests.py
index f20af169d..a49f6560a 100755
--- a/deps/v8/tools/run-tests.py
+++ b/deps/v8/tools/run-tests.py
@@ -66,11 +66,6 @@ SUPPORTED_ARCHS = ["android_arm",
"ia32",
"mipsel",
"x64"]
-# Double the timeout for these:
-SLOW_ARCHS = ["android_arm",
- "android_ia32",
- "arm",
- "mipsel"]
def BuildOptions():
@@ -155,7 +150,7 @@ def ProcessOptions(options):
options.mode = tokens[1]
options.mode = options.mode.split(",")
for mode in options.mode:
- if not mode.lower() in ["debug", "release"]:
+ if not mode in ["debug", "release"]:
print "Unknown mode %s" % mode
return False
if options.arch in ["auto", "native"]:
@@ -273,12 +268,12 @@ def Execute(arch, mode, args, options, suites, workspace):
timeout = options.timeout
if timeout == -1:
# Simulators are slow, therefore allow a longer default timeout.
- if arch in SLOW_ARCHS:
+ if arch in ["android", "arm", "mipsel"]:
timeout = 2 * TIMEOUT_DEFAULT;
else:
timeout = TIMEOUT_DEFAULT;
- timeout *= TIMEOUT_SCALEFACTOR[mode]
+ options.timeout *= TIMEOUT_SCALEFACTOR[mode]
ctx = context.Context(arch, mode, shell_dir,
mode_flags, options.verbose,
timeout, options.isolates,
@@ -298,9 +293,9 @@ def Execute(arch, mode, args, options, suites, workspace):
for s in suites:
s.ReadStatusFile(variables)
s.ReadTestCases(ctx)
+ all_tests += s.tests
if len(args) > 0:
s.FilterTestCasesByArgs(args)
- all_tests += s.tests
s.FilterTestCasesByStatus(options.warn_unused)
if options.cat:
verbose.PrintTestSource(s.tests)
diff --git a/deps/v8/tools/tick-processor.html b/deps/v8/tools/tick-processor.html
deleted file mode 100644
index bc9f636cb..000000000
--- a/deps/v8/tools/tick-processor.html
+++ /dev/null
@@ -1,168 +0,0 @@
-<!DOCTYPE html>
-<!-- Copyright 2012 the V8 project authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
- * Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
- * Neither the name of Google Inc. nor the names of its
- contributors may be used to endorse or promote products derived
- from this software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -->
-
-<html lang="en">
-<head>
- <meta charset="utf-8"/>
- <title>V8 Tick Processor</title>
-
- <style type="text/css">
- body {
- font-family: Verdana, Arial, Helvetica, sans-serif;
- font-size: 10pt;
- }
- h4 {
- margin-bottom: 0px;
- }
- p {
- margin-top: 0px;
- }
- </style>
-
- <script src="splaytree.js"></script>
- <script src="codemap.js"></script>
- <script src="csvparser.js"></script>
- <script src="consarray.js"></script>
- <script src="profile.js"></script>
- <script src="profile_view.js"></script>
- <script src="logreader.js"></script>
- <script src="tickprocessor.js"></script>
-
- <script type="text/javascript">
-
-var v8log_content;
-var textout;
-
-function load_logfile(evt) {
- textout.value = "";
- var f = evt.target.files[0];
- if (f) {
- var reader = new FileReader();
- reader.onload = function(event) {
- v8log_content = event.target.result;
- start_process();
- };
- reader.onerror = function(event) {
- console.error("File could not be read! Code " + event.target.error.code);
- };
- reader.readAsText(f);
- } else {
- alert("Failed to load file");
- }
-}
-
-function print(arg) {
- textout.value+=arg+"\n";
-}
-
-function start_process() {
- ArgumentsProcessor.DEFAULTS = {
- logFileName: 'v8.log',
- snapshotLogFileName: null,
- platform: 'unix',
- stateFilter: null,
- callGraphSize: 5,
- ignoreUnknown: false,
- separateIc: false,
- targetRootFS: '',
- nm: 'nm'
- };
-
- var entriesProviders = {
- 'unix': UnixCppEntriesProvider,
- 'windows': WindowsCppEntriesProvider,
- 'mac': MacCppEntriesProvider
- };
-
- var snapshotLogProcessor; // not used
-
- var tickProcessor = new TickProcessor(
- new (entriesProviders[ArgumentsProcessor.DEFAULTS.platform])(
- ArgumentsProcessor.DEFAULTS.nm,
- ArgumentsProcessor.DEFAULTS.targetRootFS),
- ArgumentsProcessor.DEFAULTS.separateIc,
- ArgumentsProcessor.DEFAULTS.callGraphSize,
- ArgumentsProcessor.DEFAULTS.ignoreUnknown,
- ArgumentsProcessor.DEFAULTS.stateFilter,
- snapshotLogProcessor);
-
- tickProcessor.processLogChunk(v8log_content);
- tickProcessor.printStatistics();
-}
-
-function Load() {
- document.getElementById('fileinput').addEventListener(
- 'change', load_logfile, false);
- textout = document.getElementById('textout');
-}
-</script>
-</head>
-<body onLoad="Load()">
-
-<h3 style="margin-top: 2px;">
- Chrome V8 profiling log processor
-</h3>
-<p>
-Process V8's profiling information log (sampling profiler tick information)
-in your browser. Particularly useful if you don't have the V8 shell (d8)
-at hand on your system. You still have to run Chrome with the appropriate
-<a href="https://code.google.com/p/v8/wiki/ProfilingChromiumWithV8">
- command line flags</a>
-to produce the profiling log.
-</p>
-<h4>Usage:</h4>
-<p>
-Click on the button and browse to the profiling log file (usually, v8.log).
-Process will start automatically and the output will be visible in the below
-text area.
-</p>
-<h4>Limitations and disclaimer:</h4>
-<p>
-This page offers a subset of the functionalities of the command-line tick
-processor utility in the V8 repository. In particular, this page cannot
-access the command-line utility that provides library symbol information,
-hence the [C++] section of the output stays empty. Also consider that this
-web-based tool is provided only for convenience and quick reference, you
-should refer to the
-<a href="https://code.google.com/p/v8/wiki/V8Profiler">
- command-line</a>
-version for full output.
-</p>
-<p>
-<input type="file" id="fileinput" />
-</p>
-<p>
-<textarea name="myTextArea" cols="120" rows="40" wrap="off" id="textout"
- readonly="yes"></textarea>
-</p>
-<p style="font-style:italic;">
-Copyright the V8 Authors - Last change to this page: 12/12/2012
-</p>
-
-
-</body>
-</html>
diff --git a/deps/v8/tools/tickprocessor.js b/deps/v8/tools/tickprocessor.js
index 7530c6b37..4c4886d87 100644
--- a/deps/v8/tools/tickprocessor.js
+++ b/deps/v8/tools/tickprocessor.js
@@ -73,7 +73,7 @@ function parseState(s) {
function SnapshotLogProcessor() {
LogReader.call(this, {
'code-creation': {
- parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
+ parsers: [null, parseInt, parseInt, null, 'var-args'],
processor: this.processCodeCreation },
'code-move': { parsers: [parseInt, parseInt],
processor: this.processCodeMove },
@@ -107,7 +107,7 @@ inherits(SnapshotLogProcessor, LogReader);
SnapshotLogProcessor.prototype.processCodeCreation = function(
- type, kind, start, size, name, maybe_func) {
+ type, start, size, name, maybe_func) {
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
var state = parseState(maybe_func[1]);
@@ -156,7 +156,7 @@ function TickProcessor(
'shared-library': { parsers: [null, parseInt, parseInt],
processor: this.processSharedLibrary },
'code-creation': {
- parsers: [null, parseInt, parseInt, parseInt, null, 'var-args'],
+ parsers: [null, parseInt, parseInt, null, 'var-args'],
processor: this.processCodeCreation },
'code-move': { parsers: [parseInt, parseInt],
processor: this.processCodeMove },
@@ -167,7 +167,7 @@ function TickProcessor(
'snapshot-pos': { parsers: [parseInt, parseInt],
processor: this.processSnapshotPosition },
'tick': {
- parsers: [parseInt, parseInt, parseInt, parseInt,
+ parsers: [parseInt, parseInt, parseInt,
parseInt, parseInt, 'var-args'],
processor: this.processTick },
'heap-sample-begin': { parsers: [null, null, parseInt],
@@ -231,9 +231,8 @@ TickProcessor.VmStates = {
JS: 0,
GC: 1,
COMPILER: 2,
- PARALLEL_COMPILER: 3,
- OTHER: 4,
- EXTERNAL: 5
+ OTHER: 3,
+ EXTERNAL: 4
};
@@ -309,7 +308,7 @@ TickProcessor.prototype.processSharedLibrary = function(
TickProcessor.prototype.processCodeCreation = function(
- type, kind, start, size, name, maybe_func) {
+ type, start, size, name, maybe_func) {
name = this.deserializedEntriesNames_[start] || name;
if (maybe_func.length) {
var funcAddr = parseInt(maybe_func[0]);
@@ -350,7 +349,6 @@ TickProcessor.prototype.includeTick = function(vmState) {
TickProcessor.prototype.processTick = function(pc,
sp,
- ns_since_start,
is_external_callback,
tos_or_external_callback,
vmState,